diff --git a/.gitlab/issue_templates/Release Checklist.md b/.gitlab/issue_templates/Release Checklist.md
index a151dab78418aa735b99561f27c159f60ebabce4..b4c725dbd8edda2a517870b43914a43b46ef2823 100644
--- a/.gitlab/issue_templates/Release Checklist.md
+++ b/.gitlab/issue_templates/Release Checklist.md
@@ -4,13 +4,22 @@ GitLab Runner Release manager: **MENTION_HERE**
Release blog post MR: **LINK_HERE**
-Runner entries need to be added to blog post until: **BLOG_POST_DEADLINE_HERE**
+Runner entries need to be added to blog post until: **DEADLINE_FOR_RUNNER_ITEMS_ADDITION_INTO_BLOG_POST_HERE**
+
+Technical description of the release, with commands examples, can be found at:
+https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/release_process/how_to_release_runner.md
## Before 7th
- [ ] chose a release manager
- [ ] link release blog post's MR
- [ ] set deadline for _add entries to release blog post_
+
+ Please check what deadline is set for `General Contributions` section in the release blog post
+ Merge Request. It should be 6th working day before the 22nd. In that case we can set our
+ deadline for 7th working day before 22nd, however if the deadline from the MR is earlier, then
+ use the eraliest one.
+
- [ ] Update the `X.Y.` and `X-Y-` to a specific release version
- [ ] Add the ~release label to the issue
- [ ] Add the %X.Y milestone to the issue
@@ -20,14 +29,15 @@ Runner entries need to be added to blog post until: **BLOG_POST_DEADLINE_HERE**
- [ ] check if Pipeline for `master` is passing: [](https://gitlab.com/gitlab-org/gitlab-runner/commits/master)
- [ ] add all required fixes to make `master` Pipeline passing
- [ ] add **vX.Y.0-rc.1** CHANGELOG entries
-- [ ] tag **vX.Y.0-rc.1**
-- [ ] create `X-Y-stable` branch
+- [ ] tag and push **vX.Y.0-rc.1**
+- [ ] create and push `X-Y-stable` branch
+- [ ] checkout to `master`, update `VERSION` file to `X.Y+1.0` and push `master`
- [ ] deploy **vX.Y.0-rc.1** (https://gitlab.com/gitlab-com/runbooks/blob/master/howto/update-gitlab-runner-on-managers.md)
_New features_ window is closed - things not merged into `master` up to
this day, will be released with next release.
-## 7 working days before 22th (**BLOG_POST_DEADLINE_HERE**)
+## 7 working days before 22th (**DEADLINE_FOR_RUNNER_ITEMS_ADDITION_INTO_BLOG_POST_HERE**)
- [ ] add entries to release blog post
- [ ] add release entry:
@@ -40,7 +50,8 @@ this day, will be released with next release.
documentation_link: 'https://docs.gitlab.com/runner'
documentation_text: "Read through the documentation on GitLab Runner"
description: |
- We're also releasing GitLab Runner X.Y today!
+ We're also releasing GitLab Runner X.Y today! GitLab Runner is the open source project
+ that is used to run your CI/CD jobs and send the results back to GitLab.
##### Most interesting changes:
@@ -68,8 +79,6 @@ if the only RC version was the _RC1_ released near 7th day of month.
- [ ] tag stable version
- [ ] Before 15:00 UTC
- [ ] deploy stable version to all production Runners
- - [ ] open next patch release issue: _add link here_
- - [ ] open next stable release issue: _add link here_
**RC release template**
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 94da0db2b085761923d6126a1d3f32e346503ba3..709fbbc09e38f670572afa682bef0fbe1e622b51 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,15 @@
+v 10.2.0-rc.1 (2017-11-09)
+- Update supported platforms !712
+- Fix typo in Kubernetes runner docs !714
+- Add info on upgrading to Runner 10 !709
+- Add some documentation for disable_cache configuration option !713
+- Remove .git/HEAD.lock before git fetch !722
+- Add helper_image option to docker executor config !723
+- Add notes about gitlab-runner inside the VM being used for uploads !719
+- Fix panic when global flags are passed as command flags !726
+- Update minio go library to v3.0.3 !707
+- Label ci_runner_builds metric with runner short token !729
+
v 10.1.0 (2017-10-22)
- Allow customizing go test flags with TESTFLAGS variable !688
- Clarify that cloning a runner could be considered an attack vector !658
diff --git a/Makefile b/Makefile
index a9a47681e551509b726257f9552608e575856fde..0d6ad12b114d856ff5e3c98254b1da4aeb6edb4a 100644
--- a/Makefile
+++ b/Makefile
@@ -212,6 +212,7 @@ mocks: FORCE
mockery -dir=$(GOPATH)/src/github.com/ayufan/golang-kardianos-service -output=./helpers/service/mocks -name=Interface
mockery -dir=./common -all -inpkg
mockery -dir=./helpers/docker -all -inpkg
+ mockery -dir=./shells -output=./shells/mocks -all
test-docker:
make test-docker-image IMAGE=centos:6 TYPE=rpm
diff --git a/VERSION b/VERSION
index 2bd6f7e39277d958d71e245c10304ca8c111c683..0719d810258fae82369ae6a0ee85c322bb8cbd93 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-10.2.0
+10.3.0
diff --git a/commands/builds_helper.go b/commands/builds_helper.go
index cc7a89322c440d5753ce3fe074757eb4edd732b2..92065cf61cf77e8f54b316a54ac5a74ccf1b0184 100644
--- a/commands/builds_helper.go
+++ b/commands/builds_helper.go
@@ -11,9 +11,15 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
-var numBuildsDesc = prometheus.NewDesc("ci_runner_builds", "The current number of running builds.", []string{"state", "stage", "executor_stage"}, nil)
+var numBuildsDesc = prometheus.NewDesc(
+ "ci_runner_builds",
+ "The current number of running builds.",
+ []string{"runner", "state", "stage", "executor_stage"},
+ nil,
+)
type statePermutation struct {
+ runner string
buildState common.BuildRuntimeState
buildStage common.BuildStage
executorStage common.ExecutorStage
@@ -21,6 +27,7 @@ type statePermutation struct {
func newStatePermutationFromBuild(build *common.Build) statePermutation {
return statePermutation{
+ runner: build.Runner.ShortDescription(),
buildState: build.CurrentState,
buildStage: build.CurrentStage,
executorStage: build.CurrentExecutorStage(),
@@ -191,6 +198,7 @@ func (b *buildsHelper) Collect(ch chan<- prometheus.Metric) {
numBuildsDesc,
prometheus.GaugeValue,
float64(count),
+ state.runner,
string(state.buildState),
string(state.buildStage),
string(state.executorStage),
diff --git a/commands/builds_helper_test.go b/commands/builds_helper_test.go
index b2cb86ac6ab7e301b8af3791d93b6f833f48a79a..19ef3454329fe36c62606a5ae45526e8bdbb9bfe 100644
--- a/commands/builds_helper_test.go
+++ b/commands/builds_helper_test.go
@@ -12,12 +12,19 @@ import (
"gitlab.com/gitlab-org/gitlab-runner/common"
)
+var fakeRunner = &common.RunnerConfig{
+ RunnerCredentials: common.RunnerCredentials{
+ Token: "a1b2c3d4e5f6",
+ },
+}
+
func TestBuildsHelperCollect(t *testing.T) {
ch := make(chan prometheus.Metric, 50)
b := &buildsHelper{}
b.builds = append(b.builds, &common.Build{
CurrentState: common.BuildRunStatePending,
CurrentStage: common.BuildStagePrepare,
+ Runner: fakeRunner,
})
b.Collect(ch)
assert.Len(t, ch, 1)
diff --git a/common/config.go b/common/config.go
index ad55430a68d563f8ff429f2620a64ac7775ec3ff..c746b5095060c074d6d3e72442c23162df65b3ec 100644
--- a/common/config.go
+++ b/common/config.go
@@ -79,6 +79,7 @@ type DockerConfig struct {
Tmpfs map[string]string `toml:"tmpfs,omitempty" json:"tmpfs" long:"tmpfs" env:"DOCKER_TMPFS" description:"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in the main container, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command"`
ServicesTmpfs map[string]string `toml:"services_tmpfs,omitempty" json:"services_tmpfs" long:"services-tmpfs" env:"DOCKER_SERVICES_TMPFS" description:"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in all the service containers, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command"`
SysCtls DockerSysCtls `toml:"sysctls,omitempty" json:"sysctls" long:"sysctls" env:"DOCKER_SYSCTLS" description:"Sysctl options, a toml table/json object of key=value. Value is expected to be a string."`
+ HelperImage string `toml:"helper_image,omitempty" json:"helper_image" long:"helper-image" env:"DOCKER_HELPER_IMAGE" description:"[ADVANCED] Override the default helper image used to clone repos and upload artifacts"`
}
type DockerMachine struct {
diff --git a/docs/configuration/advanced-configuration.md b/docs/configuration/advanced-configuration.md
index ad0d9305c8aa47d2a3f03290c0121bc53cc03c50..b9d5b462f83ec808963842b284c90b2259c74f8a 100644
--- a/docs/configuration/advanced-configuration.md
+++ b/docs/configuration/advanced-configuration.md
@@ -127,7 +127,7 @@ This defines the Docker Container parameters.
| `cap_drop` | drop additional Linux capabilities from the container |
| `security_opt` | set security options (--security-opt in docker run), takes a list of ':' separated key/values |
| `devices` | share additional host devices with the container |
-| `disable_cache` | disable automatic |
+| `disable_cache` | disable use of automatically created (i.e., not mapped to a host directory) cache volumes |
| `network_mode` | add container to a custom network |
| `wait_for_services_timeout` | specify how long to wait for docker services, set to 0 to disable, default: 30 |
| `cache_dir` | specify where Docker caches should be stored (this can be absolute or relative to current working directory) |
@@ -142,6 +142,7 @@ This defines the Docker Container parameters.
| `allowed_services` | specify wildcard list of services that can be specified in .gitlab-ci.yml. If not present all images are allowed (equivalent to `["*/*:*"]`) |
| `pull_policy` | specify the image pull policy: `never`, `if-not-present` or `always` (default); read more in the [pull policies documentation](../executors/docker.md#how-pull-policies-work) |
| `sysctls` | specify the sysctl options |
+| `helper_image` | [ADVANCED] Override the default helper image used to clone repos and upload artifacts |
Example:
diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index 8edd7b645ad03670aff170f4ebb004caa42ed8ca..a88716c77fdbae397b941df8a50c7c85467397cd 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -1,3 +1,7 @@
+---
+comments: false
+---
+
# Configuring GitLab Runner
Below you can find some specific documentation on configuring GitLab Runner, the
diff --git a/docs/executors/kubernetes.md b/docs/executors/kubernetes.md
index 71d7c6a58f0bb04fffa9651b37a6f9055b288d0b..b3f718313a78078fa46cb471f43b8461a265110a 100644
--- a/docs/executors/kubernetes.md
+++ b/docs/executors/kubernetes.md
@@ -12,6 +12,15 @@ are as follows:
- The build container is `build`
- The services containers are `svc-X` where `X` is `[0-9]+`
+Note that when services and containers are running in the same Kubernetes
+pod, they are all sharing the same localhost address. The following restrictions
+are then applicable:
+
+- The services are *not* accessible via their DNS name, you need to use localhost
+ instead.
+- You cannot use several services using the same port (e.g., you cannot have two
+ `mysql` services at the same time).
+
## Workflow
The Kubernetes executor divides the build into multiple steps:
diff --git a/docs/executors/virtualbox.md b/docs/executors/virtualbox.md
index 484662eab8115ba318efef4eaec69931f0966cb1..359427486bb2ed1b722f959662a80564b3b7035e 100644
--- a/docs/executors/virtualbox.md
+++ b/docs/executors/virtualbox.md
@@ -32,6 +32,7 @@ the `[[runners]]` section in
1. If Windows VM, see [Checklist for Windows VMs](#checklist-for-windows-vms)
1. Install the OpenSSH server
1. Install all other dependencies required by your build
+1. If you want to upload job artifacts, install `gitlab-runner` inside the VM
1. Log out and shutdown the virtual machine
It's completely fine to use automation tools like Vagrant to provision the
@@ -59,6 +60,7 @@ When a new build is started:
1. The Runner creates a snapshot of the running virtual machine (this is done
to speed up any next builds)
1. The Runner connects to the virtual machine and executes a build
+1. If enabled, artifacts upload is done using the `gitlab-runner` binary *inside* the virtual machine.
1. The Runner stops or shutdowns the virtual machine
## Checklist for Windows VMs
diff --git a/docs/faq/README.md b/docs/faq/README.md
index 67e100cb6c05c9d10ae26f54a9efb83c32a0559d..9c4c8adc0f3dfed1970e457969a350780f0362c7 100644
--- a/docs/faq/README.md
+++ b/docs/faq/README.md
@@ -1,4 +1,4 @@
-# FAQ
+# GitLab Runner FAQ
Some Frequently Asked Questions about GitLab Runner.
diff --git a/docs/index.md b/docs/index.md
index 345ff5d808672368095e489591357ef5817e08c5..eea8159438b1db31f458326ee561e943ab0d472d 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,5 +1,6 @@
---
toc: false
+comments: false
last_updated: 2017-10-09
---
diff --git a/docs/install/index.md b/docs/install/index.md
index 342272a339c36271b55037b32dd185f2688df106..2ace14c62ca8cbf9861cdce681f508a632f91ce7 100644
--- a/docs/install/index.md
+++ b/docs/install/index.md
@@ -1,3 +1,7 @@
+---
+comments: false
+---
+
# Install GitLab Runner
GitLab Runner can be installed and used on GNU/Linux, macOS, FreeBSD, and Windows.
diff --git a/docs/install/linux-repository.md b/docs/install/linux-repository.md
index 65ed78ab26fc4a63977ae2cdfff43480cf204bcb..a6037d4475ddc2455c436d786e8dec9bc0a0ff8b 100644
--- a/docs/install/linux-repository.md
+++ b/docs/install/linux-repository.md
@@ -23,8 +23,8 @@ We provide packages for the currently supported versions of Debian, Ubuntu, Mint
| Mint | rafaela | April 2019 |
| Mint | rebecca | April 2019 |
| Mint | qiana | April 2019 |
-| REHL/CentOS | 7 | June 2024 |
-| REHL/CentOS | 6 | November 2020 |
+| RHEL/CentOS | 7 | June 2024 |
+| RHEL/CentOS | 6 | November 2020 |
| Fedora | 25 | |
| Fedora | 26 | |
diff --git a/docs/install/windows.md b/docs/install/windows.md
index f147fa6f0ce398f2b8a9fedd521f561318424b2b..e168530bea636f93db827a0f6e16081c46f89c36 100644
--- a/docs/install/windows.md
+++ b/docs/install/windows.md
@@ -20,7 +20,7 @@ want to install a version prior to GitLab Runner 10, [visit the old docs](old.md
created. Rename the binary to `gitlab-runner.exe`.
You can download a binary for every available version as described in
[Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).
-1. Run an [`Administrator`/elevated command prompt][prompt] (WindowsKey + X then select Command Prompt (Admin)).
+1. Run an [`Administrator`/elevated command prompt][prompt] (WindowsKey, search for "cmd", right click and run as admin).
1. [Register the Runner](../register/index.md).
1. Install the Runner as a service and start it. You can either run the service
using the Built-in System Account (recommended) or using a user account.
diff --git a/docs/release_process/README.md b/docs/release_process/README.md
index 00ca4c83216b520c14cc74fdb3dafb3ed9800a92..b7e6700d70fc9ff94fff33572fd9700c38f8c82e 100644
--- a/docs/release_process/README.md
+++ b/docs/release_process/README.md
@@ -1,3 +1,7 @@
+---
+comments: false
+---
+
# GitLab Runner release process
To handle the growth of this project, in `v1.6` we've introduced a release process correlated
@@ -43,7 +47,10 @@ together with GitLab CE and GitLab EE projects.
- deploy stable version to `docker-ci-X.gitlap.com` and `shared-runners-manager-X.gitlab.com`
- announce the new release in _GitLab's release blog post_
- open the _new features_ window for the next release
- - start the `pick-into-stable` strategy for the `X-Y-stable` branch
+ - all MRs that are meant to go into the upcoming release should have the
+ correct milestone assigned _and_ the `Pick into X.Y` label where `X.Y` is
+ equal to the milestone, so that release managers can find and pick them.
+ Merge requests without this label will not be merged into the stable branch.
### Supported releases
@@ -77,10 +84,9 @@ them - they will be planned for one of the upcoming releases.
For release planning we use the [_milestones_][runner-milestones] feature.
-Each issue or merge request planned for a release will be assigned to
-one of `vX.Y` milestones. This rule will be very important after
-releasing the version when the `pick-into-stable` strategy will be used to
-merge changes into the release stable branch.
+If a merge request is to be picked into more than one release it will need one
+`Pick into X.Y` label per release where the merge request should be back-ported
+to.
After releasing a version, the `vX.Y` milestone will be still used to assign
issues and merge requests related to support process (bugs, security fixes, etc.).
@@ -114,7 +120,9 @@ For a particular change:
- assign the MR to a milestone related to the oldest version in which the bug exists
- choose a good, descriptive title for the MR since it will be automatically
inserted in the `CHANGELOG` before doing the release
- - assign the `pick-into-stable` label
+ - if a merge request is to be picked into more than one release it will need one
+ `Pick into X.Y` label per release where the merge request should be back-ported
+ to.
- merge the feature branch into `master`
- after the branch is merged into `master`, cherry-pick the merge commit to each
`X-Y-stable` branch starting from the branch related to the assigned
@@ -125,12 +133,14 @@ For a particular change:
- assign the MR to a milestone related to the oldest version in which the bug exists
- choose a good, descriptive title for the MR since it will be automatically
inserted in the `CHANGELOG` before doing the release
- - assign the `pick-into-stable` label
+ - if a merge request is to be picked into more than one release it will need one
+ `Pick into X.Y` label per release where the merge request should be back-ported
+ to.
- merge the feature branch into the assigned `X-Y-stable` branch
- after the branch is merged into the assigned `X-Y-stable` branch,
- cherry-pick the merge and commit to each `X-Y-stable` branch starting from
- the branch related to the assigned milestone up to the latest release before
- the MR target
+ cherry-pick the merge and commit to each `X-Y-stable` branch corresponding
+ to the `Pick into X.Y` labels assigned. Remove each label once picked into
+ their respective stable branches.
For each `X-Y-stable` branch - if the release should be published:
diff --git a/docs/release_process/how_to_release_runner.md b/docs/release_process/how_to_release_runner.md
index ca1c240048c996ef2560b673d1518967d5d6de3a..6425a1322db36f94e7d9837c67feadedb581d45d 100644
--- a/docs/release_process/how_to_release_runner.md
+++ b/docs/release_process/how_to_release_runner.md
@@ -1,3 +1,7 @@
+---
+comments: false
+---
+
# How to release GitLab Runner
Permission to push to `master` branch at https://gitlab.com/gitlab-org/gitlab-runner.git
diff --git a/executors/docker/executor_docker.go b/executors/docker/executor_docker.go
index 671d568ce4d626c470e75cf33832c60f783d7bd3..a97d6ad620a8ef8c0c33dd1ba7539761da19c753 100644
--- a/executors/docker/executor_docker.go
+++ b/executors/docker/executor_docker.go
@@ -211,6 +211,11 @@ func (s *executor) getArchitecture() string {
}
func (s *executor) getPrebuiltImage() (*types.ImageInspect, error) {
+ if imageNameFromConfig := s.Config.Docker.HelperImage; imageNameFromConfig != "" {
+ s.Debugln("Pull configured helper_image for predefined container instead of import bundled image", imageNameFromConfig, "...")
+ return s.getDockerImage(imageNameFromConfig)
+ }
+
architecture := s.getArchitecture()
if architecture == "" {
return nil, errors.New("unsupported docker architecture")
diff --git a/executors/docker/executor_docker_command_test.go b/executors/docker/executor_docker_command_test.go
index c6ef214f86e9f314976f63fc5bc503430efeee8b..39e5fce6dda0f9d4d45af958508f0a5f11b5b378 100644
--- a/executors/docker/executor_docker_command_test.go
+++ b/executors/docker/executor_docker_command_test.go
@@ -842,3 +842,33 @@ func TestDockerCommandWithGitSSLCAInfo(t *testing.T) {
assert.Contains(t, out, "Cloning repository")
assert.Contains(t, out, "Updating/initializing submodules")
}
+
+func TestDockerCommandWithHelperImageConfig(t *testing.T) {
+ if helpers.SkipIntegrationTests(t, "docker", "info") {
+ return
+ }
+
+ helperImageConfig := "gitlab/gitlab-runner-helper:x86_64-64eea86c"
+
+ successfulBuild, err := common.GetRemoteSuccessfulBuild()
+ assert.NoError(t, err)
+ build := &common.Build{
+ JobResponse: successfulBuild,
+ Runner: &common.RunnerConfig{
+ RunnerSettings: common.RunnerSettings{
+ Executor: "docker",
+ Docker: &common.DockerConfig{
+ Image: "alpine",
+ HelperImage: helperImageConfig,
+ },
+ },
+ },
+ }
+
+ var buffer bytes.Buffer
+ err = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})
+ assert.NoError(t, err)
+ out := buffer.String()
+ assert.Contains(t, out, "Pulling docker image "+helperImageConfig)
+ assert.Contains(t, out, "Using docker image sha256:bbd86c6ba107ae2feb8dbf9024df4b48597c44e1b584a3d901bba91f7fc500e3 for predefined container...")
+}
diff --git a/executors/docker/machine/consts.go b/executors/docker/machine/consts.go
index c152e222da63366e77276ef467797e784bcfac9e..b3d17b8d7343ca9c3d11832715f54e3f94f18f7f 100644
--- a/executors/docker/machine/consts.go
+++ b/executors/docker/machine/consts.go
@@ -6,3 +6,4 @@ var provisionRetryInterval = time.Second
var machineDeadInterval = 20 * time.Minute
var removeRetryInterval = 30 * time.Second
var removeRetryTries = 3
+var machineStopCommandTimeout = 1 * time.Minute
diff --git a/executors/docker/machine/details.go b/executors/docker/machine/details.go
index b757ae64c86fd877baafe32512215ff95130dbd2..c9baff71d95ea8f0b087140095ecddf5d97a4031 100644
--- a/executors/docker/machine/details.go
+++ b/executors/docker/machine/details.go
@@ -63,4 +63,14 @@ func (m *machineDetails) writeDebugInformation() {
ioutil.WriteFile("machines/"+details.Details.Name+".yml", []byte(data), 0600)
}
+func (m *machineDetails) logger() *logrus.Entry {
+ return logrus.WithFields(logrus.Fields{
+ "name": m.Name,
+ "created": time.Since(m.Created),
+ "used": time.Since(m.Used),
+ "usedCount": m.UsedCount,
+ "reason": m.Reason,
+ })
+}
+
type machinesDetails map[string]*machineDetails
diff --git a/executors/docker/machine/provider.go b/executors/docker/machine/provider.go
index 852796218897a90515e3d48575cf19c3f9ddfb34..8366ab9faf508ec1870df4c15e2f4e28f0839f67 100644
--- a/executors/docker/machine/provider.go
+++ b/executors/docker/machine/provider.go
@@ -72,7 +72,8 @@ func (m *machineProvider) create(config *common.RunnerConfig, state machineState
err := m.machine.Create(config.Machine.MachineDriver, details.Name, config.Machine.MachineOptions...)
for i := 0; i < 3 && err != nil; i++ {
details.RetryCount++
- logrus.WithField("name", details.Name).WithError(err).
+ logrus.WithField("name", details.Name).
+ WithError(err).
Warningln("Machine creation failed, trying to provision")
time.Sleep(provisionRetryInterval)
err = m.machine.Provision(details.Name)
@@ -150,10 +151,7 @@ func (m *machineProvider) retryUseMachine(config *common.RunnerConfig) (details
func (m *machineProvider) removeMachine(details *machineDetails) (err error) {
if !m.machine.Exist(details.Name) {
- logrus.WithField("name", details.Name).
- WithField("created", time.Since(details.Created)).
- WithField("used", time.Since(details.Used)).
- WithField("reason", details.Reason).
+ details.logger().
Warningln("Skipping machine removal, because it doesn't exist")
return nil
}
@@ -164,20 +162,25 @@ func (m *machineProvider) removeMachine(details *machineDetails) (err error) {
defer m.stuckRemoveLock.Unlock()
}
- logrus.WithField("name", details.Name).
- WithField("created", time.Since(details.Created)).
- WithField("used", time.Since(details.Used)).
- WithField("reason", details.Reason).
- Warningln("Removing machine")
+ details.logger().
+ Warningln("Stopping machine")
+ err = m.machine.Stop(details.Name, machineStopCommandTimeout)
+ if err != nil {
+ details.logger().
+ WithError(err).
+ Warningln("Error while stopping machine")
+ }
+ details.logger().
+ Warningln("Removing machine")
err = m.machine.Remove(details.Name)
- if err == nil {
- return nil
+ if err != nil {
+ details.RetryCount++
+ time.Sleep(removeRetryInterval)
+ return err
}
- details.RetryCount++
- time.Sleep(removeRetryInterval)
- return err
+ return nil
}
func (m *machineProvider) finalizeRemoval(details *machineDetails) {
@@ -192,10 +195,7 @@ func (m *machineProvider) finalizeRemoval(details *machineDetails) {
defer m.lock.Unlock()
delete(m.details, details.Name)
- logrus.WithField("name", details.Name).
- WithField("created", time.Since(details.Created)).
- WithField("used", time.Since(details.Used)).
- WithField("reason", details.Reason).
+ details.logger().
WithField("now", time.Now()).
WithField("retries", details.RetryCount).
Infoln("Machine removed")
@@ -215,12 +215,11 @@ func (m *machineProvider) remove(machineName string, reason ...interface{}) erro
details.Reason = fmt.Sprint(reason...)
details.State = machineStateRemoving
details.RetryCount = 0
- logrus.WithField("name", machineName).
- WithField("created", time.Since(details.Created)).
- WithField("used", time.Since(details.Used)).
- WithField("reason", details.Reason).
+
+ details.logger().
WithField("now", time.Now()).
- Warningln("Removing machine")
+ Warningln("Requesting machine removal")
+
details.Used = time.Now()
details.writeDebugInformation()
diff --git a/executors/docker/machine/provider_test.go b/executors/docker/machine/provider_test.go
index 42bcd7144e54f46b0f4598679a46ab2f28f77b77..3ed794d891e0b43bd8d46f5ae54824655681b801 100644
--- a/executors/docker/machine/provider_test.go
+++ b/executors/docker/machine/provider_test.go
@@ -90,6 +90,7 @@ type testMachine struct {
Created chan bool
Removed chan bool
+ Stopped chan bool
}
func (m *testMachine) Create(driver, name string, opts ...string) error {
@@ -115,6 +116,12 @@ func (m *testMachine) Provision(name string) error {
return nil
}
+func (m *testMachine) Stop(name string, timeout time.Duration) error {
+ m.Stopped <- true
+
+ return nil
+}
+
func (m *testMachine) Remove(name string) error {
if name == "remove-fail" {
return errors.New("failed to remove")
@@ -221,6 +228,7 @@ func testMachineProvider(machine ...string) (*machineProvider, *testMachine) {
machines: machine,
Created: make(chan bool, 10),
Removed: make(chan bool, 10),
+ Stopped: make(chan bool, 10),
}
p := newMachineProvider("docker_machines", "docker")
p.machine = t
@@ -383,6 +391,7 @@ func TestMachinePreCreateMode(t *testing.T) {
assert.NoError(t, err)
p.Release(config, d)
+ <-m.Stopped
<-m.Removed
assertIdleMachines(t, p, 1, "it should downscale to single machine")
diff --git a/executors/shell/executor_shell_test.go b/executors/shell/executor_shell_test.go
index 50d46c68aacb1483e898759385347e5fa8dc67aa..ace4ed5a7526b9d5944df761e202c4f45fac72e1 100644
--- a/executors/shell/executor_shell_test.go
+++ b/executors/shell/executor_shell_test.go
@@ -4,6 +4,7 @@ import (
"bytes"
"io/ioutil"
"os"
+ "os/exec"
"path/filepath"
"testing"
"time"
@@ -182,6 +183,24 @@ func TestBuildWithShallowLock(t *testing.T) {
})
}
+func TestBuildWithHeadLock(t *testing.T) {
+ onEachShell(t, func(t *testing.T, shell string) {
+ successfulBuild, err := common.GetSuccessfulBuild()
+ assert.NoError(t, err)
+ build, cleanup := newBuild(t, successfulBuild, shell)
+ defer cleanup()
+
+ err = runBuild(t, build)
+ assert.NoError(t, err)
+
+ build.JobResponse.AllowGitFetch = true
+ ioutil.WriteFile(build.BuildDir+"/.git/HEAD.lock", []byte{}, os.ModeSticky)
+
+ err = runBuild(t, build)
+ assert.NoError(t, err)
+ })
+}
+
func TestBuildWithGitLFSHook(t *testing.T) {
onEachShell(t, func(t *testing.T, shell string) {
successfulBuild, err := common.GetSuccessfulBuild()
@@ -425,6 +444,71 @@ func TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone(t *testing.T)
})
}
+func TestBuildWithGitSubmoduleModified(t *testing.T) {
+ onEachShell(t, func(t *testing.T, shell string) {
+ successfulBuild, err := common.GetSuccessfulBuild()
+ assert.NoError(t, err)
+ build, cleanup := newBuild(t, successfulBuild, shell)
+ defer cleanup()
+
+ build.Variables = append(build.Variables, common.JobVariable{Key: "GIT_SUBMODULE_STRATEGY", Value: "normal"})
+
+ out, err := runBuildReturningOutput(t, build)
+ assert.NoError(t, err)
+ assert.Contains(t, out, "Updating/initializing submodules...")
+
+ commitToSubmodule(t, build)
+ // modify submodule without commit before second build
+ modifySubmoduleAfterCommit := "not commited change"
+ ioutil.WriteFile(build.BuildDir+"/gitlab-grack/README.md", []byte(modifySubmoduleAfterCommit), os.ModeSticky)
+
+ build.JobResponse.AllowGitFetch = true
+ out, err = runBuildReturningOutput(t, build)
+ assert.NoError(t, err)
+ assert.NotContains(t, out, "Your local changes to the following files would be overwritten by checkout")
+ assert.NotContains(t, out, "Please commit your changes or stash them before you switch branches")
+ assert.NotContains(t, out, "Aborting")
+ assert.Contains(t, out, "Updating/initializing submodules...")
+ })
+}
+
+func TestBuildWithGitSubmoduleLockFiles(t *testing.T) {
+ onEachShell(t, func(t *testing.T, shell string) {
+ successfulBuild, err := common.GetSuccessfulBuild()
+ assert.NoError(t, err)
+ build, cleanup := newBuild(t, successfulBuild, shell)
+ defer cleanup()
+
+ build.Variables = append(build.Variables, common.JobVariable{Key: "GIT_SUBMODULE_STRATEGY", Value: "normal"})
+
+ err = runBuild(t, build)
+ assert.NoError(t, err)
+
+ commitToSubmodule(t, build)
+
+ ioutil.WriteFile(build.BuildDir+"/.git/modules/gitlab-grack/index.lock", []byte{}, os.ModeSticky)
+ ioutil.WriteFile(build.BuildDir+"/.git/modules/gitlab-grack/shallow.lock", []byte{}, os.ModeSticky)
+ ioutil.WriteFile(build.BuildDir+"/.git/modules/gitlab-grack/HEAD.lock", []byte{}, os.ModeSticky)
+
+ build.JobResponse.AllowGitFetch = true
+ err = runBuild(t, build)
+ assert.NoError(t, err)
+ })
+}
+
+func commitToSubmodule(t *testing.T, build *common.Build) {
+ modifySubmoduleBeforeCommit := "commited change"
+ ioutil.WriteFile(build.BuildDir+"/gitlab-grack/README.md", []byte(modifySubmoduleBeforeCommit), os.ModeSticky)
+ _, err := exec.Command("git", "-C", build.BuildDir+"/gitlab-grack", "add", "README.md").Output()
+ assert.NoError(t, err)
+ _, err = exec.Command("git", "-C", build.BuildDir+"/gitlab-grack", "-c", "user.name='test'", "-c", "user.email='test@example.org'", "commit", "-m", "modify submodule").Output()
+ assert.NoError(t, err)
+ _, err = exec.Command("git", "-C", build.BuildDir, "add", "gitlab-grack").Output()
+ assert.NoError(t, err)
+ _, err = exec.Command("git", "-C", build.BuildDir, "-c", "user.name='test'", "-c", "user.email='test@example.org'", "commit", "-m", "modify submodule").Output()
+ assert.NoError(t, err)
+}
+
func TestBuildWithoutDebugTrace(t *testing.T) {
onEachShell(t, func(t *testing.T, shell string) {
successfulBuild, err := common.GetSuccessfulBuild()
diff --git a/helpers/docker/machine.go b/helpers/docker/machine.go
index a65724146ca2c9c404186a4f907e787efbb6d413..2672211dcd858df118b7a2e190849156388948c2 100644
--- a/helpers/docker/machine.go
+++ b/helpers/docker/machine.go
@@ -1,9 +1,14 @@
package docker_helpers
+import (
+ "time"
+)
+
type Machine interface {
Create(driver, name string, opts ...string) error
Provision(name string) error
Remove(name string) error
+ Stop(name string, timeout time.Duration) error
List() (machines []string, err error)
Exist(name string) bool
diff --git a/helpers/docker/machine_command.go b/helpers/docker/machine_command.go
index d68a81bc1c5d2c865f2a6569e6be363d7a03246d..3b7ffb7747ef009de7c29586a6bad471b763cfa5 100644
--- a/helpers/docker/machine_command.go
+++ b/helpers/docker/machine_command.go
@@ -2,6 +2,7 @@ package docker_helpers
import (
"bufio"
+ "context"
"errors"
"fmt"
"io"
@@ -10,6 +11,7 @@ import (
"os/exec"
"path/filepath"
"strings"
+ "time"
"github.com/Sirupsen/logrus"
"github.com/docker/machine/commands/mcndirs"
@@ -116,6 +118,23 @@ func (m *machineCommand) Provision(name string) error {
return cmd.Run()
}
+func (m *machineCommand) Stop(name string, timeout time.Duration) error {
+ ctx, ctxCancelFn := context.WithTimeout(context.Background(), timeout)
+ defer ctxCancelFn()
+
+ cmd := exec.CommandContext(ctx, "docker-machine", "stop", name)
+ cmd.Env = os.Environ()
+
+ fields := logrus.Fields{
+ "operation": "stop",
+ "name": name,
+ }
+ stdoutLogWriter(cmd, fields)
+ stderrLogWriter(cmd, fields)
+
+ return cmd.Run()
+}
+
func (m *machineCommand) Remove(name string) error {
cmd := exec.Command("docker-machine", "rm", "-y", name)
cmd.Env = os.Environ()
diff --git a/helpers/docker/mock_Machine.go b/helpers/docker/mock_Machine.go
index d08110d05f0d1c4ee06d143d5a07c6b1b6db6c4c..34e09c4466089f63b9a7ddcd5e2fa888c04ff632 100644
--- a/helpers/docker/mock_Machine.go
+++ b/helpers/docker/mock_Machine.go
@@ -1,6 +1,10 @@
package docker_helpers
-import "github.com/stretchr/testify/mock"
+import (
+ "time"
+
+ "github.com/stretchr/testify/mock"
+)
type MockMachine struct {
mock.Mock
@@ -20,6 +24,13 @@ func (m *MockMachine) Provision(name string) error {
return r0
}
+func (m *MockMachine) Stop(name string, timeout time.Duration) error {
+ ret := m.Called(name, timeout)
+
+ r0 := ret.Error(0)
+
+ return r0
+}
func (m *MockMachine) Remove(name string) error {
ret := m.Called(name)
diff --git a/shells/abstract.go b/shells/abstract.go
index 92a52bb870a779849b7dda3387609784255869dc..2ea314ce2e28efb4b51153589fc884148c47dad9 100644
--- a/shells/abstract.go
+++ b/shells/abstract.go
@@ -16,6 +16,8 @@ import (
type AbstractShell struct {
}
+var gitLockFiles = []string{"index.lock", "shallow.lock", "HEAD.lock"}
+
func (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {
features.Artifacts = true
features.Cache = true
@@ -98,10 +100,11 @@ func (b *AbstractShell) writeFetchCmd(w ShellWriter, build *common.Build, projec
b.writeGitSSLConfig(w, build, []string{"--local"})
}
- // Remove .git/{index,shallow}.lock files from .git, which can fail the fetch command
+ // Remove .git/{index,shallow,HEAD}.lock files from .git, which can fail the fetch command
// The file can be left if previous build was terminated during git operation
- w.RmFile(".git/index.lock")
- w.RmFile(".git/shallow.lock")
+ for _, lockFile := range gitLockFiles {
+ w.RmFile(".git/" + lockFile)
+ }
w.IfFile(".git/hooks/post-checkout")
w.RmFile(".git/hooks/post-checkout")
@@ -138,6 +141,12 @@ func (b *AbstractShell) writeSubmoduleUpdateCmd(w ShellWriter, build *common.Bui
w.Notice("Updating/initializing submodules...")
}
+ // Remove .git/{index,shallow,HEAD}.lock files from .git, which can fail the fetch command
+ // The file can be left if previous build was terminated during git operation
+ for _, lockFile := range gitLockFiles {
+ w.RmFileWithinDirectory(".git", lockFile)
+ }
+
// Sync .git/config to .gitmodules in case URL changes (e.g. new build token)
args := []string{"submodule", "sync"}
if recursive {
@@ -146,11 +155,17 @@ func (b *AbstractShell) writeSubmoduleUpdateCmd(w ShellWriter, build *common.Bui
w.Command("git", args...)
// Update / initialize submodules
- args = []string{"submodule", "update", "--init"}
+ updateArgs := []string{"submodule", "update", "--init"}
+ foreachArgs := []string{"submodule", "foreach"}
if recursive {
- args = append(args, "--recursive")
+ updateArgs = append(updateArgs, "--recursive")
+ foreachArgs = append(foreachArgs, "--recursive")
}
- w.Command("git", args...)
+
+ // Clean changed files in submodules
+ // "git submodule update --force" option not supported in Git 1.7.1 (shipped with CentOS 6)
+ w.Command("git", append(foreachArgs, "git", "reset", "--hard")...)
+ w.Command("git", updateArgs...)
}
func (b *AbstractShell) cacheFile(build *common.Build, userKey string) (key, file string) {
diff --git a/shells/bash.go b/shells/bash.go
index 7b97b5b44184b2f6e1326a9156b095e9ec1e6590..897a931d4ecd59fa2c06894f175130a3b8676e86 100644
--- a/shells/bash.go
+++ b/shells/bash.go
@@ -149,6 +149,10 @@ func (b *BashWriter) RmFile(path string) {
b.Command("rm", "-f", path)
}
+func (b *BashWriter) RmFileWithinDirectory(path, filename string) {
+ b.Command("find", path, "-type", "f", "-name", filename, "-delete")
+}
+
func (b *BashWriter) Absolute(dir string) string {
if path.IsAbs(dir) {
return dir
diff --git a/shells/cache.go b/shells/cache.go
index 37abff74508f1ecfc07d8b87bdfd7197b9133c0d..64f0dbd67d173bb6e16c24b63fd7da05e9714434 100644
--- a/shells/cache.go
+++ b/shells/cache.go
@@ -52,7 +52,7 @@ func getCacheObjectName(build *common.Build, cache *common.CacheConfig, key stri
}
func getCacheStorageClient(cache *common.CacheConfig) (scl *minio.Client, err error) {
- scl, err = minio.New(cache.ServerAddress, cache.AccessKey, cache.SecretKey, cache.Insecure)
+ scl, err = minio.New(cache.ServerAddress, cache.AccessKey, cache.SecretKey, !cache.Insecure)
if err != nil {
logrus.Warningln(err)
return
diff --git a/shells/cache_test.go b/shells/cache_test.go
index 575ab1cd71eea8e8f3f0d0007bd16b8341a8a2cf..0ec8e216ecf7a8831e9bd9657ec1f60e15ef7bc5 100644
--- a/shells/cache_test.go
+++ b/shells/cache_test.go
@@ -20,47 +20,77 @@ func defaultS3CacheFactory() *common.CacheConfig {
}
}
-var s3CacheBuild = &common.Build{
- JobResponse: common.JobResponse{
- JobInfo: common.JobInfo{
- ProjectID: 10,
+func defaults3CacheBuild(cacheConfig *common.CacheConfig) *common.Build {
+ return &common.Build{
+ JobResponse: common.JobResponse{
+ JobInfo: common.JobInfo{
+ ProjectID: 10,
+ },
+ RunnerInfo: common.RunnerInfo{
+ Timeout: 3600,
+ },
},
- RunnerInfo: common.RunnerInfo{
- Timeout: 3600,
+ Runner: &common.RunnerConfig{
+ RunnerCredentials: common.RunnerCredentials{
+ Token: "longtoken",
+ },
+ RunnerSettings: common.RunnerSettings{
+ Cache: cacheConfig,
+ },
},
- },
- Runner: &common.RunnerConfig{
- RunnerCredentials: common.RunnerCredentials{
- Token: "longtoken",
- },
- RunnerSettings: common.RunnerSettings{
- Cache: defaultS3CacheFactory(),
- },
- },
+ }
}
func TestS3CacheUploadURL(t *testing.T) {
s3Cache := defaultS3CacheFactory()
+ s3Cache.Insecure = false
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheUploadURL(s3CacheBuild, "key")
require.NotNil(t, url)
assert.Equal(t, s3Cache.ServerAddress, url.Host)
+ assert.Regexp(t, "^https://", url)
+}
+
+func TestS3CacheUploadInsecureURL(t *testing.T) {
+ s3Cache := defaultS3CacheFactory()
+ s3Cache.Insecure = true
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
+ url := getCacheUploadURL(s3CacheBuild, "key")
+ require.NotNil(t, url)
+ assert.Equal(t, s3Cache.ServerAddress, url.Host)
+ assert.Regexp(t, "^http://", url)
}
func TestS3CacheDownloadURL(t *testing.T) {
s3Cache := defaultS3CacheFactory()
+ s3Cache.Insecure = false
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
+ url := getCacheDownloadURL(s3CacheBuild, "key")
+ require.NotNil(t, url)
+ assert.Equal(t, s3Cache.ServerAddress, url.Host)
+ assert.Regexp(t, "^https://", url)
+}
+
+func TestS3CacheDownloadInsecureURL(t *testing.T) {
+ s3Cache := defaultS3CacheFactory()
+ s3Cache.Insecure = true
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheDownloadURL(s3CacheBuild, "key")
require.NotNil(t, url)
assert.Equal(t, s3Cache.ServerAddress, url.Host)
+ assert.Regexp(t, "^http://", url)
}
func TestGetCacheObjectNameWhenKeyIsEmptyResultIsAlsoEmpty(t *testing.T) {
s3Cache := defaultS3CacheFactory()
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "")
require.Empty(t, url)
}
func TestGetCacheObjectName(t *testing.T) {
s3Cache := defaultS3CacheFactory()
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "runner/longtoke/project/10/key", url)
}
@@ -68,6 +98,7 @@ func TestGetCacheObjectName(t *testing.T) {
func TestGetCacheObjectNameWhenPathIsSetThenUrlContainsIt(t *testing.T) {
s3Cache := defaultS3CacheFactory()
s3Cache.Path = "whatever"
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "whatever/runner/longtoke/project/10/key", url)
}
@@ -75,6 +106,7 @@ func TestGetCacheObjectNameWhenPathIsSetThenUrlContainsIt(t *testing.T) {
func TestGetCacheObjectNameWhenPathHasMultipleSegmentIsSetThenUrlContainsIt(t *testing.T) {
s3Cache := defaultS3CacheFactory()
s3Cache.Path = "some/other/path/goes/here"
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "some/other/path/goes/here/runner/longtoke/project/10/key", url)
}
@@ -82,6 +114,7 @@ func TestGetCacheObjectNameWhenPathHasMultipleSegmentIsSetThenUrlContainsIt(t *t
func TestGetCacheObjectNameWhenPathIsNotSetThenUrlDoesNotContainIt(t *testing.T) {
s3Cache := defaultS3CacheFactory()
s3Cache.Path = ""
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "runner/longtoke/project/10/key", url)
}
@@ -89,6 +122,7 @@ func TestGetCacheObjectNameWhenPathIsNotSetThenUrlDoesNotContainIt(t *testing.T)
func TestGetCacheObjectNameWhenSharedFlagIsFalseThenRunnerSegmentExistsInTheUrl(t *testing.T) {
s3Cache := defaultS3CacheFactory()
s3Cache.Shared = false
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "runner/longtoke/project/10/key", url)
}
@@ -96,6 +130,7 @@ func TestGetCacheObjectNameWhenSharedFlagIsFalseThenRunnerSegmentExistsInTheUrl(
func TestGetCacheObjectNameWhenSharedFlagIsFalseThenRunnerSegmentShouldNotBePresent(t *testing.T) {
s3Cache := defaultS3CacheFactory()
s3Cache.Shared = true
+ s3CacheBuild := defaults3CacheBuild(s3Cache)
url := getCacheObjectName(s3CacheBuild, s3Cache, "key")
require.Equal(t, "project/10/key", url)
}
diff --git a/shells/cmd.go b/shells/cmd.go
index 0427ec7795c3aa5fdba379c3d067b532e5e7d504..e4f4bb9c359211ff90f0e003445db8f1ecc070aa 100644
--- a/shells/cmd.go
+++ b/shells/cmd.go
@@ -177,6 +177,13 @@ func (b *CmdWriter) RmFile(path string) {
b.Line("rd /s /q " + batchQuote(helpers.ToBackslash(path)) + " 2>NUL 1>NUL")
}
+func (b *CmdWriter) RmFileWithinDirectory(path, filename string) {
+ b.Line("set currentDirectory=%CD%")
+ b.Line("cd " + batchQuote(helpers.ToBackslash(path)))
+ b.Line("del " + batchQuote(helpers.ToBackslash(filename)) + " /s /q /f 2>NUL 1>NUL")
+ b.Line("cd %currentDirectory%")
+}
+
func (b *CmdWriter) Print(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET
b.Line("echo " + batchEscapeVariable(coloredText))
diff --git a/shells/mocks/ShellWriter.go b/shells/mocks/ShellWriter.go
index 5b3df718a0753ce103c391477ceba1a2917a650a..a4457a11deb11c11351243f753a5ddc26902d028 100644
--- a/shells/mocks/ShellWriter.go
+++ b/shells/mocks/ShellWriter.go
@@ -153,6 +153,11 @@ func (_m *ShellWriter) RmFile(path string) {
_m.Called(path)
}
+// RmFileWithinDirectory provides a mock function with given fields: path, filename
+func (_m *ShellWriter) RmFileWithinDirectory(path string, filename string) {
+ _m.Called(path, filename)
+}
+
// TmpFile provides a mock function with given fields: name
func (_m *ShellWriter) TmpFile(name string) string {
ret := _m.Called(name)
diff --git a/shells/powershell.go b/shells/powershell.go
index ee86d433e775377e55bc259dcdefa7b709618608..f1bd1c0243d60a6b709284fc5cd690d6f6a47869 100644
--- a/shells/powershell.go
+++ b/shells/powershell.go
@@ -183,6 +183,18 @@ func (b *PsWriter) RmFile(path string) {
b.Line("")
}
+func (b *PsWriter) RmFileWithinDirectory(path, filename string) {
+ path = psQuote(helpers.ToBackslash(path))
+ filename = psQuote(helpers.ToBackslash(filename))
+
+ b.Line("dir -Path " + path + " -Recurse -Filter " + filename + " | ForEach-Object {")
+ b.Indent()
+ b.RmFile("$_.FullName")
+ b.Unindent()
+ b.Line("}")
+ b.Line("")
+}
+
func (b *PsWriter) Print(format string, arguments ...interface{}) {
coloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...)
b.Line("echo " + psQuoteVariable(coloredText))
diff --git a/shells/shell_writer.go b/shells/shell_writer.go
index 6f9323993076ab4b8b695808277502a83a183025..0d5885d2790c23c8f6067ea2caeb7dd31b90aafe 100644
--- a/shells/shell_writer.go
+++ b/shells/shell_writer.go
@@ -19,6 +19,7 @@ type ShellWriter interface {
MkDir(path string)
RmDir(path string)
RmFile(path string)
+ RmFileWithinDirectory(path, filename string)
Absolute(path string) string
TmpFile(name string) string
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..37ec93a14fdcd0d6e525d97c0cfa6b314eaa98d8
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..ac034e5258f442e810ed65478712aaa7bb3cd5b1
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/Makefile
@@ -0,0 +1,12 @@
+.PHONY: build test bench vet
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -race -test.bench=. -test.benchmem
+
+vet:
+ go vet
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e67d51f3204c103f88a8015a07aae1e5bfd2d041
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,746 @@
+INI [](https://travis-ci.org/go-ini/ini) [](https://sourcegraph.com/github.com/go-ini/ini?badge)
+===
+
+
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+ go get gopkg.in/ini.v1
+
+To use with latest changes:
+
+ go get github.com/go-ini/ini
+
+Please add `-u` flag to update in the future.
+
+### Testing
+
+If you want to test on your machine, please apply `-t` flag:
+
+ go get -t gopkg.in/ini.v1
+
+Please add `-u` flag to update in the future.
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
+
+#### Ignore cases of key name
+
+When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 and sec2 are the exactly same section object
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 and key2 are the exactly same key object
+key1, err := sec1.GetKey("Key")
+key2, err := sec2.GetKey("KeY")
+```
+
+#### MySQL-like boolean key
+
+MySQL's configuration allows a key without value as follows:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
+
+To generate such keys in your program, you could use `NewBooleanKey`:
+
+```go
+key, err := sec.NewBooleanKey("skip-host-cache")
+```
+
+#### Comment
+
+Take care that following format will be treated as comment:
+
+1. Line begins with `#` or `;`
+2. Words after `#` or `;`
+3. Words after section name (i.e words after `[some section name]`)
+
+If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
+
+Alternatively, you can use following `LoadOptions` to completely ignore inline comments:
+
+```go
+cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
+```
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("section name")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To check if a key exists:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+To check if raw value exists:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Well, I hate continuation lines, how do I disable that?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+Holy crap!
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### Auto-split values into a slice
+
+To use zero value of type for invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+To exclude invalid values out of result slice:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+Or to return nothing but error when have invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+By default, spaces are used to align "=" sign between key and values, to disable that:
+
+```go
+ini.PrettyFormat = false
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### Retrieve parent keys available to a child section
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### Unparseable Sections
+
+Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1> This slide has the fuel listed in the wrong units `))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1> This slide has the fuel listed in the wrong units
+------ end --- */
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // Things can be simpler.
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // Just map a section? Fine.
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Value Mapper
+
+To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
new file mode 100644
index 0000000000000000000000000000000000000000..0cf4194492ca0fdda2f5eea66e60bebbbcf1b4ac
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -0,0 +1,733 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+使用一个特定版本:
+
+ go get gopkg.in/ini.v1
+
+使用最新版:
+
+ go get github.com/go-ini/ini
+
+如需更新请添加 `-u` 选项。
+
+### 测试安装
+
+如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
+
+ go get -t gopkg.in/ini.v1
+
+如需更新请添加 `-u` 选项。
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
+
+#### 忽略键名的大小写
+
+有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 和 sec2 指向同一个分区对象
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 和 key2 指向同一个键对象
+key1, err := sec1.GetKey("Key")
+key2, err := sec2.GetKey("KeY")
+```
+
+#### 类似 MySQL 配置中的布尔值键
+
+MySQL 的配置文件中会出现没有具体值的布尔类型的键:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
+
+如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`:
+
+```go
+key, err := sec.NewBooleanKey("skip-host-cache")
+```
+
+#### 关于注释
+
+下述几种情况的内容将被视为注释:
+
+1. 所有以 `#` 或 `;` 开头的行
+2. 所有在 `#` 或 `;` 之后的内容
+3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
+
+如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
+
+除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释:
+
+```go
+cfg, err := LoadSources(LoadOptions{IgnoreInlineComment: true}, "app.ini"))
+```
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("section name")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+判断某个键是否存在:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+判断某个原值是否存在:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+ IgnoreContinuation: true,
+}, "filename")
+```
+
+哇靠给力啊!
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### 自动分割键值到切片(slice)
+
+当存在无效输入时,使用零值代替:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+从结果切片中剔除无效输入:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+当存在无效输入时,直接返回错误:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
+
+```go
+ini.PrettyFormat = false
+```
+
+## 高级用法
+
+### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### 获取上级父分区下的所有键名
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### 无法解析的分区
+
+如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1> This slide has the fuel listed in the wrong units `))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1> This slide has the fuel listed in the wrong units
+------ end --- */
+```
+
+### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // 一切竟可以如此的简单。
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // 嗯哼?只需要映射一个分区吗?
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string `ini:"places,omitempty"`
+ None []int `ini:",omitempty"`
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 值映射器(Value Mapper)
+
+值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
+
+```go
+type Env struct {
+ Foo string `ini:"foo"`
+}
+
+func main() {
+ cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+ cfg.ValueMapper = os.ExpandEnv
+ // ...
+ env := &Env{}
+ err = cfg.Section("env").MapTo(env)
+}
+```
+
+本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..80afe7431584aa2eb208a6b99f035a1dcc2b005f
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/error.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f3c4d1ed1fa26c1d419924680b9ee2d3dd28d0e
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,556 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+const (
+ // Name for default section. You can use this constant or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DEFAULT_SECTION = "DEFAULT"
+
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+ _VERSION = "1.28.2"
+)
+
+// Version returns current package version literal.
+func Version() string {
+ return _VERSION
+}
+
+var (
+ // Delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows
+ // at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Indicate whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+
+ // Explicitly write DEFAULT section header
+ DefaultHeader = false
+
+ // Indicate whether to put a line between sections
+ PrettySection = true
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+ reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+ return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+ return nil
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ // Make sure data is safe in multiple goroutines.
+ lock sync.RWMutex
+
+ // Allow combination of multiple data sources.
+ dataSources []dataSource
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ // To keep data in order.
+ sectionList []string
+
+ options LoadOptions
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ options: opts,
+ }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ } else if f.options.Insensitive && name != DEFAULT_SECTION {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ } else if f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("section '%s' does not exist", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ sections := make([]*Section, len(f.sectionList))
+ for i := range f.sectionList {
+ sections[i] = f.Section(f.sectionList[i])
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ }
+ if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ if i > 0 || DefaultHeader {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modifed if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.ContainsAny(kname, "\"=:") {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KEY_LIST:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ }
+ if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.ContainsAny(kname, "\"=:"):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ for _, val := range key.ValueWithShadows() {
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
+ }
+
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KEY_LIST
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100644
index 0000000000000000000000000000000000000000..838356af01b2a66dfd7208d2bf1b36b8764cc299
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -0,0 +1,699 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ Comment string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(str, delim)
+ for i := range vals {
+ // vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.Atoi(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 0)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, uint(val))
+ }
+ }
+ return vals, nil
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ for _, str := range strs {
+ val, err := strconv.ParseUint(str, 10, 64)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ for _, str := range strs {
+ val, err := time.Parse(format, str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..69d5476273b8df374aeefffdc8c3cbfc9450e2b8
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -0,0 +1,361 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+type tokenType int
+
+const (
+ _TOKEN_INVALID tokenType = iota
+ _TOKEN_COMMENT
+ _TOKEN_SECTION
+ _TOKEN_KEY
+)
+
+type parser struct {
+ buf *bufio.Reader
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+ return &parser{
+ buf: bufio.NewReader(r),
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ p.buf.Read(mask)
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ p.buf.Read(mask)
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && string(line[0:3]) == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ endIdx := -1
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], "=:")
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, "=:")
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && string(line[0:3]) == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+
+ // Check continuation lines when desired
+ if !ignoreContinuation && line[len(line)-1] == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !ignoreInlineComment {
+ i := strings.IndexAny(line, "#;")
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+ }
+
+ // Trim single quotes
+ if hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"') {
+ line = line[1 : len(line)-1]
+ }
+ return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader)
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ section, _ := f.NewSection(DEFAULT_SECTION)
+
+ var line []byte
+ var inUnparseableSection bool
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
+ closeIdx := bytes.LastIndex(line, []byte("]"))
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset aotu-counter and comments
+ p.comment.Reset()
+ p.count = 1
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(line)
+ if err != nil {
+ // Treat as boolean key when desired, and whole line is key name.
+ if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
+ kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ if err != nil {
+ return err
+ }
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100644
index 0000000000000000000000000000000000000000..94f7375ed4c427c8322d2d82fe733c9ce96146f1
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -0,0 +1,248 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + "."
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name])
+ }
+ }
+ return children
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644
index 0000000000000000000000000000000000000000..eeb8dabaaca8d3da142d77cda0ced896d0fc7a35
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,500 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && int(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
+ opts := strings.SplitN(tag, ",", 3)
+ rawName = opts[0]
+ if len(opts) > 1 {
+ omitEmpty = opts[1] == "omitempty"
+ }
+ if len(opts) > 2 {
+ allowShadow = opts[2] == "allowshadow"
+ }
+ return rawName, omitEmpty, allowShadow
+}
+
+func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, false)
+}
+
+// MapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ sliceOf := field.Type().Elem().Kind()
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ opts := strings.SplitN(tag, ",", 2)
+ if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, opts[0])
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/minio/go-homedir/LICENSE b/vendor/github.com/minio/go-homedir/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f9c841a51e0d11ec20c19ff7600e88da826867fa
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/minio/go-homedir/README.md b/vendor/github.com/minio/go-homedir/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..085f57775c80119564e1b5d7f294e33b3d536d5a
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/README.md
@@ -0,0 +1,16 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package is not
+available on certain architectures such as i386 or PNaCl. Additionally
+it has a cgo dependency on Darwin systems. This means that any Go code
+that uses that package cannot cross compile. But 99% of the time the
+use for `os/user` is just to retrieve the home directory, which we can
+do for the current user without cgo. This library does that, enabling
+cross-compilation.
diff --git a/vendor/github.com/minio/go-homedir/dir_posix.go b/vendor/github.com/minio/go-homedir/dir_posix.go
new file mode 100644
index 0000000000000000000000000000000000000000..4615fe063119da04eb5728e4b5a82f22fd030857
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/dir_posix.go
@@ -0,0 +1,64 @@
+// +build !windows
+
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "os/user"
+ "strconv"
+ "strings"
+)
+
+// dir returns the homedir of current user for all POSIX compatible
+// operating systems.
+func dir() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // user.Current is not implemented for i386 and PNaCL like environments.
+ if currUser, err := user.Current(); err == nil {
+ return currUser.HomeDir, nil
+ }
+
+ // If that fails, try getent
+ var stdout bytes.Buffer
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If "getent" is missing, ignore it
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd = exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/minio/go-homedir/dir_windows.go b/vendor/github.com/minio/go-homedir/dir_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..85e5218c7fceb47f01c036852e62d1aa8067af7c
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/dir_windows.go
@@ -0,0 +1,28 @@
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+package homedir
+
+import (
+ "errors"
+ "os"
+)
+
+// dir returns the homedir of current user for MS Windows OS.
+func dir() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ home = os.Getenv("USERPROFILE")
+ }
+ if home == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/minio/go-homedir/homedir.go b/vendor/github.com/minio/go-homedir/homedir.go
new file mode 100644
index 0000000000000000000000000000000000000000..092373801ff59f2ef9783969c725612cff6e7a81
--- /dev/null
+++ b/vendor/github.com/minio/go-homedir/homedir.go
@@ -0,0 +1,68 @@
+// Copyright 2016 (C) Mitchell Hashimoto
+// Distributed under the MIT License.
+
+// Package homedir implements a portable function to determine current user's homedir.
+package homedir
+
+import (
+ "errors"
+ "path/filepath"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.Mutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ // Return cached homedir if available.
+ if !DisableCache {
+ if homedirCache != "" {
+ return homedirCache, nil
+ }
+ }
+
+ // Determine OS speific current homedir.
+ result, err := dir()
+ if err != nil {
+ return "", err
+ }
+
+ // Cache for future lookups.
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
diff --git a/vendor/github.com/minio/minio-go/API.md b/vendor/github.com/minio/minio-go/API.md
deleted file mode 100644
index 848c1e6d81e31a68c21a220b379246bd2a74cac8..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/API.md
+++ /dev/null
@@ -1,535 +0,0 @@
-## API Documentation
-
-### Minio client object creation
-Minio client object is created using minio-go:
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/minio/minio-go"
-)
-
-func main() {
- s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", false)
- if err !!= nil {
- fmt.Println(err)
- return
- }
-}
-```
-
-s3Client can be used to perform operations on S3 storage. APIs are described below.
-
-### Bucket operations
-
-* [`MakeBucket`](#MakeBucket)
-* [`ListBuckets`](#ListBuckets)
-* [`BucketExists`](#BucketExists)
-* [`RemoveBucket`](#RemoveBucket)
-* [`ListObjects`](#ListObjects)
-* [`ListIncompleteUploads`](#ListIncompleteUploads)
-
-### Object operations
-
-* [`GetObject`](#GetObject)
-* [`PutObject`](#PutObject)
-* [`CopyObject`](#CopyObject)
-* [`StatObject`](#StatObject)
-* [`RemoveObject`](#RemoveObject)
-* [`RemoveIncompleteUpload`](#RemoveIncompleteUpload)
-
-### File operations.
-
-* [`FPutObject`](#FPutObject)
-* [`FGetObject`](#FPutObject)
-
-### Bucket policy operations.
-
-* [`SetBucketPolicy`](#SetBucketPolicy)
-* [`GetBucketPolicy`](#GetBucketPolicy)
-* [`RemoveBucketPolicy`](#RemoveBucketPolicy)
-
-### Presigned operations
-
-* [`PresignedGetObject`](#PresignedGetObject)
-* [`PresignedPutObject`](#PresignedPutObject)
-* [`PresignedPostPolicy`](#PresignedPostPolicy)
-
-### Bucket operations
----------------------------------------
-
-#### MakeBucket(bucketName string, location string) error
-Create a new bucket.
-
-__Parameters__
-* `bucketName` _string_ - Name of the bucket.
-* `location` _string_ - region valid values are _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_
-
-__Example__
-```go
-err := s3Client.MakeBucket("mybucket", "us-west-1")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println("Successfully created mybucket.")
-```
----------------------------------------
-
-#### ListBuckets() ([]BucketInfo, error)
-Lists all buckets.
-
-`bucketList` lists bucket in the format:
-* `bucket.Name` _string_: bucket name
-* `bucket.CreationDate` time.Time : date when bucket was created
-
-__Example__
-```go
-buckets, err := s3Client.ListBuckets()
-if err != nil {
- fmt.Println(err)
- return
-}
-for _, bucket := range buckets {
- fmt.Println(bucket)
-}
-```
----------------------------------------
-
-#### BucketExists(bucketName string) error
-Check if bucket exists.
-
-__Parameters__
-* `bucketName` _string_ : name of the bucket
-
-__Example__
-```go
-err := s3Client.BucketExists("mybucket")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveBucket(bucketName string) error
-Remove a bucket.
-
-__Parameters__
-* `bucketName` _string_ : name of the bucket
-
-__Example__
-```go
-err := s3Client.RemoveBucket("mybucket")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### GetBucketPolicy(bucketName string, objectPrefix string) error
-Get access permissions on a bucket or a prefix.
-
-__Parameters__
-* `bucketName` _string_ : name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-
-__Example__
-```go
-bucketPolicy, err := s3Client.GetBucketPolicy("mybucket", "")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println("Access permissions for mybucket is", bucketPolicy)
-```
----------------------------------------
-
-#### SetBucketPolicy(bucketname string, objectPrefix string, policy BucketPolicy) error
-Set access permissions on bucket or an object prefix.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-* `policy` _BucketPolicy_: policy can be _BucketPolicyNone_, _BucketPolicyReadOnly_, _BucketPolicyReadWrite_, _BucketPolicyWriteOnly_
-
-__Example__
-```go
-err := s3Client.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveBucketPolicy(bucketname string, objectPrefix string) error
-Remove existing permissions on bucket or an object prefix.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_ : name of the object prefix
-
-__Example__
-```go
-err := s3Client.RemoveBucketPolicy("mybucket", "myprefix")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-List objects in a bucket.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectPrefix` _string_: the prefix of the objects that should be listed
-* `recursive` _bool_: `true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'
-* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
-
-__Return Value__
-* `<-chan ObjectInfo` _chan ObjectInfo_: Read channel for all the objects in the bucket, the object is of the format:
- * `objectInfo.Key` _string_: name of the object
- * `objectInfo.Size` _int64_: size of the object
- * `objectInfo.ETag` _string_: etag of the object
- * `objectInfo.LastModified` _time.Time_: modified time stamp
-
-__Example__
-```go
-// Create a done channel to control 'ListObjects' go routine.
-doneCh := make(chan struct{})
-
-// Indicate to our routine to exit cleanly upon return.
-defer close(doneCh)
-
-isRecursive := true
-objectCh := s3Client.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
-for object := range objectCh {
- if object.Err != nil {
- fmt.Println(object.Err)
- return
- }
- fmt.Println(object)
-}
-
-```
-
----------------------------------------
-
-#### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectMultipartInfo
-List partially uploaded objects in a bucket.
-
-__Parameters__
-* `bucketname` _string_: name of the bucket
-* `prefix` _string_: prefix of the object names that are partially uploaded
-* `recursive` bool: directory style listing when false, recursive listing when true
-* `doneCh` chan struct{} : channel for pro-actively closing the internal go routine
-
-__Return Value__
-* `<-chan ObjectMultipartInfo` _chan ObjectMultipartInfo_ : emits multipart objects of the format:
- * `multiPartObjInfo.Key` _string_: name of the incomplete object
- * `multiPartObjInfo.UploadID` _string_: upload ID of the incomplete object
- * `multiPartObjInfo.Size` _int64_: size of the incompletely uploaded object
-
-__Example__
-```go
-// Create a done channel to control 'ListObjects' go routine.
-doneCh := make(chan struct{})
-
-// Indicate to our routine to exit cleanly upon return.
-defer close(doneCh)
-
-isRecursive := true
-multiPartObjectCh := s3Client.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
-for multiPartObject := range multiPartObjectCh {
- if multiPartObject.Err != nil {
- fmt.Println(multiPartObject.Err)
- return
- }
- fmt.Println(multiPartObject)
-}
-```
-
----------------------------------------
-### Object operations
-
-#### GetObject(bucketName string, objectName string) *Object
-Download an object.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Return Value__
-* `object` _*Object_ : _Object_ represents object reader.
-
-__Example__
-```go
-object, err := s3Client.GetObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-localFile _ := os.Open("/tmp/local-file")
-if _, err := io.Copy(localFile, object); err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
----------------------------------------
-
-#### FGetObject(bucketName string, objectName string, filePath string) error
-Callback is called with `error` in case of error or `null` in case of success
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `filePath` _string_: path to which the object data will be written to
-
-__Example__
-```go
-err := s3Client.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
-Upload contents from `io.Reader` to objectName.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `reader` _io.Reader_: Any golang object implementing io.Reader
-* `contentType` _string_: content type of the object.
-
-__Example__
-```go
-file, err := os.Open("my-testfile")
-if err != nil {
- fmt.Println(err)
- return
-}
-defer file.Close()
-
-n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
-Copy a source object into a new object with the provided name in the provided bucket.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `objectSource` _string_: name of the object source.
-* `conditions` _CopyConditions_: Collection of supported CopyObject conditions. ['x-amz-copy-source', 'x-amz-copy-source-if-match', 'x-amz-copy-source-if-none-match', 'x-amz-copy-source-if-unmodified-since', 'x-amz-copy-source-if-modified-since']
-
-__Example__
-```go
-// All following conditions are allowed and can be combined together.
-
-// Set copy conditions.
-var copyConds = minio.NewCopyConditions()
-// Set modified condition, copy object modified since 2014 April.
-copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-
-// Set unmodified condition, copy object unmodified since 2014 April.
-// copyConds.SetUnmodified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
-
-// Set matching ETag condition, copy object which matches the following ETag.
-// copyConds.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
-
-// Set matching ETag except condition, copy object which does not match the following ETag.
-// copyConds.SetMatchETagExcept("31624deb84149d2f8ef9c385918b653a")
-
-err := s3Client.CopyObject("my-bucketname", "my-objectname", "/my-sourcebucketname/my-sourceobjectname", copyConds)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
-Uploads the object using contents from a file
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `filePath` _string_: file path of the file to be uploaded
-* `contentType` _string_: content type of the object
-
-__Example__
-```go
-n, err := s3Client.FPutObject("my-bucketname", "my-objectname", "/tmp/my-filename.csv", "application/csv")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### StatObject(bucketName string, objectName string) (ObjectInfo, error)
-Get metadata of an object.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Return Value__
- `objInfo` _ObjectInfo_ : object stat info for following format:
- * `objInfo.Size` _int64_: size of the object
- * `objInfo.ETag` _string_: etag of the object
- * `objInfo.ContentType` _string_: Content-Type of the object
- * `objInfo.LastModified` _string_: modified time stamp
-
-__Example__
-```go
-objInfo, err := s3Client.StatObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-fmt.Println(objInfo)
-```
----------------------------------------
-
-#### RemoveObject(bucketName string, objectName string) error
-Remove an object.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Example__
-```go
-err := s3Client.RemoveObject("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
----------------------------------------
-
-#### RemoveIncompleteUpload(bucketName string, objectName string) error
-Remove an partially uploaded object.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-
-__Example__
-```go
-err := s3Client.RemoveIncompleteUpload("mybucket", "photo.jpg")
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
-### Presigned operations
----------------------------------------
-
-#### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
-Generate a presigned URL for GET.
-
-__Parameters__
-* `bucketName` _string_: name of the bucket.
-* `objectName` _string_: name of the object.
-* `expiry` _time.Duration_: expiry in seconds.
-* `reqParams` _url.Values_ : additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_
-
-__Example__
-```go
-// Set request parameters for content-disposition.
-reqParams := make(url.Values)
-reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
-
-// Generates a presigned url which expires in a day.
-presignedURL, err := s3Client.PresignedGetObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60, reqParams)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
-Generate a presigned URL for PUT.
-
-NOTE: you can upload to S3 only with specified object name.
-
-
-__Parameters__
-* `bucketName` _string_: name of the bucket
-* `objectName` _string_: name of the object
-* `expiry` _time.Duration_: expiry in seconds
-
-__Example__
-```go
-// Generates a url which expires in a day.
-presignedURL, err := s3Client.PresignedPutObject("mybucket", "photo.jpg", time.Second * 24 * 60 * 60)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
----------------------------------------
-
-#### PresignedPostPolicy(policy PostPolicy) (*url.URL, map[string]string, error)
-PresignedPostPolicy we can provide policies specifying conditions restricting
-what you want to allow in a POST request, such as bucket name where objects can be
-uploaded, key name prefixes that you want to allow for the object being created and more.
-
-We need to create our policy first:
-```go
-policy := minio.NewPostPolicy()
-```
-Apply upload policy restrictions:
-```go
-policy.SetBucket("my-bucketname")
-policy.SetKey("my-objectname")
-policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
-
-// Only allow 'png' images.
-policy.SetContentType("image/png")
-
-// Only allow content size in range 1KB to 1MB.
-policy.SetContentLengthRange(1024, 1024*1024)
-```
-Get the POST form key/value object:
-```go
-url, formData, err := s3Client.PresignedPostPolicy(policy)
-if err != nil {
- fmt.Println(err)
- return
-}
-```
-
-POST your content from the command line using `curl`:
-```go
-fmt.Printf("curl ")
-for k, v := range m {
- fmt.Printf("-F %s=%s ", k, v)
-}
-fmt.Printf("-F file=@/etc/bash.bashrc ")
-fmt.Printf("%s\n", url)
-```
diff --git a/vendor/github.com/minio/minio-go/INSTALLGO.md b/vendor/github.com/minio/minio-go/INSTALLGO.md
deleted file mode 100644
index 81c3d53f53fa775382040a0fa52201cb3970513b..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/INSTALLGO.md
+++ /dev/null
@@ -1,83 +0,0 @@
-## Ubuntu (Kylin) 14.04
-### Build Dependencies
-This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
-
-##### Install Git, GCC
-```sh
-$ sudo apt-get install git build-essential
-```
-
-##### Install Go 1.5+
-
-Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
-
-```sh
-$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
-$ mkdir -p ${HOME}/bin/
-$ mkdir -p ${HOME}/go/
-$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
-```
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOROOT=${HOME}/bin/go
-export GOPATH=${HOME}/go
-export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
-```
-```sh
-$ source ~/.bashrc
-```
-
-##### Testing it all
-```sh
-$ go env
-```
-
-## OS X (Yosemite) 10.10
-### Build Dependencies
-This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
-
-##### Install brew
-```sh
-$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-```
-
-##### Install Git, Python
-```sh
-$ brew install git python
-```
-
-##### Install Go 1.5+
-
-Install golang binaries using `brew`
-
-```sh
-$ brew install go
-$ mkdir -p $HOME/go
-```
-
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOPATH=${HOME}/go
-export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
-export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
-export PATH=$PATH:${GOPATH}/bin
-```
-
-##### Source the new environment
-
-```sh
-$ source ~/.bash_profile
-```
-
-##### Testing it all
-```sh
-$ go env
-```
diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md
index 6dbef6265fbc543968e3ada284811626354f4da9..e2a957137fe7c1ac82bcf7b2656932990289039d 100644
--- a/vendor/github.com/minio/minio-go/MAINTAINERS.md
+++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md
@@ -5,7 +5,6 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
-
Edit `libraryVersion` constant in `api.go`.
```
@@ -13,7 +12,23 @@ $ grep libraryVersion api.go
libraryVersion = "0.3.0"
```
+Commit your changes
+```
+$ git commit -a -m "Bump to new release 0.3.0" --author "Minio Trusted "
+```
+
+Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
```
-$ git tag 0.3.0
+$ export GNUPGHOME=/path/to/trusted/key
+$ git tag -s 0.3.0
+$ git push
$ git push --tags
-```
\ No newline at end of file
+```
+
+### Announce
+Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
+
+To generate `changelog`
+```sh
+git log --no-color --pretty=format:'-%d %s (%cr) <%an>' ..
+```
diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md
index f21f2882dcf61766bd1e9e529caf9e061b84759a..5eb6656d57190da729b73aa3703084969a5ca708 100644
--- a/vendor/github.com/minio/minio-go/README.md
+++ b/vendor/github.com/minio/minio-go/README.md
@@ -1,105 +1,237 @@
-# Minio Go Library for Amazon S3 Compatible Cloud Storage [](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [](https://slack.minio.io) [](https://sourcegraph.com/github.com/minio/minio-go?badge)
-## Description
+The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
-Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default.
+**Supported cloud storage providers:**
-List of supported cloud storage providers.
-
- - AWS Signature Version 4
+- AWS Signature Version 4
- Amazon S3
- Minio
- - AWS Signature Version 2
+- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
-## Install
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
-If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md).
+This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
+## Download from Github
```sh
-$ go get github.com/minio/minio-go
+go get -u github.com/minio/minio-go
```
-## Example
+## Initialize Minio Client
+Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
-### ListBuckets()
+| Parameter | Description|
+| :--- | :--- |
+| endpoint | URL to object storage service. |
+| accessKeyID | Access key is the user ID that uniquely identifies your account. |
+| secretAccessKey | Secret key is the password to your account. |
+| secure | Set this value to 'true' to enable secure (HTTPS) access. |
-This example shows how to List your buckets.
```go
package main
import (
- "log"
-
"github.com/minio/minio-go"
+ "log"
)
func main() {
- // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access.
- // This boolean value is the last argument for New().
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
- // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically
- // determined based on the Endpoint value.
- s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false)
- if err != nil {
- log.Fatalln(err)
- }
- buckets, err := s3Client.ListBuckets()
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
if err != nil {
log.Fatalln(err)
}
- for _, bucket := range buckets {
- log.Println(bucket)
- }
-}
+
+ log.Printf("%#v\n", minioClient) // minioClient is now setup
```
-## Documentation
+## Quick Start Example - File Uploader
+This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
+
+We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
+
+### FileUploader.go
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
-[API documentation](./API.md)
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
-## Examples
+ // Make a new bucket called mymusic.
+ bucketName := "mymusic"
+ location := "us-east-1"
-### Bucket Operations.
-* [MakeBucket(bucketName, location) error](examples/s3/makebucket.go)
-* [BucketExists(bucketName) error](examples/s3/bucketexists.go)
-* [RemoveBucket(bucketName) error](examples/s3/removebucket.go)
-* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go)
-* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go)
-* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go)
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // Check to see if we already own this bucket (which happens if you run this twice)
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
-### Object Operations.
-* [PutObject(bucketName, objectName, io.Reader, contentType) error](examples/s3/putobject.go)
-* [GetObject(bucketName, objectName) (*Object, error)](examples/s3/getobject.go)
-* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go)
-* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go)
-* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go)
+ // Upload the zip file
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
-### File Object Operations.
-* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go)
-* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go)
+ // Upload the zip file with FPutObject
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
+ if err != nil {
+ log.Fatalln(err)
+ }
-### Presigned Operations.
-* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (*url.URL, error)](examples/s3/presignedgetobject.go)
-* [PresignedPutObject(bucketName, objectName, time.Duration) (*url.URL, error)](examples/s3/presignedputobject.go)
-* [PresignedPostPolicy(NewPostPolicy()) (*url.URL, map[string]string, error)](examples/s3/presignedpostpolicy.go)
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
-### Bucket Policy Operations.
-* [SetBucketPolicy(bucketName, objectPrefix, BucketPolicy) error](examples/s3/setbucketpolicy.go)
-* [GetBucketPolicy(bucketName, objectPrefix) (BucketPolicy, error)](examples/s3/getbucketpolicy.go)
-* [RemoveBucketPolicy(bucketName, objectPrefix) error](examples/s3/removebucketpolicy.go)
+### Run FileUploader
+```sh
+go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
-### API Reference
+mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+```
-[](http://godoc.org/github.com/minio/minio-go)
+## API Reference
+The full API Reference is available here.
+
+* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API Reference : Bucket Operations
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API Reference : Bucket policy Operations
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
+
+### API Reference : Bucket notification Operations
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API Reference : File Object Operations
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+
+### API Reference : Object Operations
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API Reference: Encrypted Object Operations
+* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
+* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
+
+### API Reference : Presigned Operations
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+### API Reference : Client custom settings
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+## Full Examples
+
+### Full Examples : Bucket Operations
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### Full Examples : Bucket policy Operations
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### Full Examples : Bucket notification Operations
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
+
+### Full Examples : File Object Operations
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+
+### Full Examples : Object Operations
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### Full Examples : Encrypted Object Operations
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+
+### Full Examples : Presigned Operations
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## Explore Further
+* [Complete Documentation](https://docs.minio.io)
+* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
## Contribute
+[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
-[Contributors Guide](./CONTRIBUTING.md)
+[](https://travis-ci.org/minio/minio-go)
+[](https://ci.appveyor.com/project/harshavardhana/minio-go)
-[](https://travis-ci.org/minio/minio-go) [](https://ci.appveyor.com/project/harshavardhana/minio-go)
diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go
new file mode 100644
index 0000000000000000000000000000000000000000..4fa88b81843366903d982bbf62f34c63b265893f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-compose-object.go
@@ -0,0 +1,532 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// SSEInfo - represents Server-Side-Encryption parameters specified by
+// a user.
+type SSEInfo struct {
+ key []byte
+ algo string
+}
+
+// NewSSEInfo - specifies (binary or un-encoded) encryption key and
+// algorithm name. If algo is empty, it defaults to "AES256". Ref:
+// https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html
+func NewSSEInfo(key []byte, algo string) SSEInfo {
+ if algo == "" {
+ algo = "AES256"
+ }
+ return SSEInfo{key, algo}
+}
+
+// internal method that computes SSE-C headers
+func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
+ if s == nil {
+ return nil
+ }
+
+ cs := ""
+ if isCopySource {
+ cs = "copy-source-"
+ }
+ return map[string]string{
+ "x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
+ "x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
+ "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
+ }
+}
+
+// GetSSEHeaders - computes and returns headers for SSE-C as key-value
+// pairs. They can be set as metadata in PutObject* requests (for
+// encryption) or be set as request headers in `Core.GetObject` (for
+// decryption).
+func (s *SSEInfo) GetSSEHeaders() map[string]string {
+ return s.getSSEHeaders(false)
+}
+
+// DestinationInfo - type with information about the object to be
+// created via server-side copy requests, using the Compose API.
+type DestinationInfo struct {
+ bucket, object string
+
+ // key for encrypting destination
+ encryption *SSEInfo
+
+ // if no user-metadata is provided, it is copied from source
+ // (when there is only once source object in the compose
+ // request)
+ userMetadata map[string]string
+}
+
+// NewDestinationInfo - creates a compose-object/copy-source
+// destination info object.
+//
+// `encSSEC` is the key info for server-side-encryption with customer
+// provided key. If it is nil, no encryption is performed.
+//
+// `userMeta` is the user-metadata key-value pairs to be set on the
+// destination. The keys are automatically prefixed with `x-amz-meta-`
+// if needed. If nil is passed, and if only a single source (of any
+// size) is provided in the ComposeObject call, then metadata from the
+// source is copied to the destination.
+func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
+ userMeta map[string]string) (d DestinationInfo, err error) {
+
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucket); err != nil {
+ return d, err
+ }
+ if err = s3utils.CheckValidObjectName(object); err != nil {
+ return d, err
+ }
+
+ // Process custom-metadata to remove a `x-amz-meta-` prefix if
+ // present and validate that keys are distinct (after this
+ // prefix removal).
+ m := make(map[string]string)
+ for k, v := range userMeta {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ k = k[len("x-amz-meta-"):]
+ }
+ if _, ok := m[k]; ok {
+ return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
+ }
+ m[k] = v
+ }
+
+ return DestinationInfo{
+ bucket: bucket,
+ object: object,
+ encryption: encryptSSEC,
+ userMetadata: m,
+ }, nil
+}
+
+// getUserMetaHeadersMap - construct appropriate key-value pairs to send
+// as headers from metadata map to pass into copy-object request. For
+// single part copy-object (i.e. non-multipart object), enable the
+// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to
+// `REPLACE`, so that metadata headers from the source are not copied
+// over.
+func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string {
+ if len(d.userMetadata) == 0 {
+ return nil
+ }
+ r := make(map[string]string)
+ if withCopyDirectiveHeader {
+ r["x-amz-metadata-directive"] = "REPLACE"
+ }
+ for k, v := range d.userMetadata {
+ r["x-amz-meta-"+k] = v
+ }
+ return r
+}
+
+// SourceInfo - represents a source object to be copied, using
+// server-side copying APIs.
+type SourceInfo struct {
+ bucket, object string
+
+ start, end int64
+
+ decryptKey *SSEInfo
+ // Headers to send with the upload-part-copy request involving
+ // this source object.
+ Headers http.Header
+}
+
+// NewSourceInfo - create a compose-object/copy-object source info
+// object.
+//
+// `decryptSSEC` is the decryption key using server-side-encryption
+// with customer provided key. It may be nil if the source is not
+// encrypted.
+func NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo {
+ r := SourceInfo{
+ bucket: bucket,
+ object: object,
+ start: -1, // range is unspecified by default
+ decryptKey: decryptSSEC,
+ Headers: make(http.Header),
+ }
+
+ // Set the source header
+ r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object))
+
+ // Assemble decryption headers for upload-part-copy request
+ for k, v := range decryptSSEC.getSSEHeaders(true) {
+ r.Headers.Set(k, v)
+ }
+
+ return r
+}
+
+// SetRange - Set the start and end offset of the source object to be
+// copied. If this method is not called, the whole source object is
+// copied.
+func (s *SourceInfo) SetRange(start, end int64) error {
+ if start > end || start < 0 {
+ return ErrInvalidArgument("start must be non-negative, and start must be at most end.")
+ }
+ // Note that 0 <= start <= end
+ s.start, s.end = start, end
+ return nil
+}
+
+// SetMatchETagCond - Set ETag match condition. The object is copied
+// only if the etag of the source matches the value given here.
+func (s *SourceInfo) SetMatchETagCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-match", etag)
+ return nil
+}
+
+// SetMatchETagExceptCond - Set the ETag match exception
+// condition. The object is copied only if the etag of the source is
+// not the value given here.
+func (s *SourceInfo) SetMatchETagExceptCond(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-none-match", etag)
+ return nil
+}
+
+// SetModifiedSinceCond - Set the modified since condition.
+func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetUnmodifiedSinceCond - Set the unmodified since condition.
+func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Input time cannot be 0.")
+ }
+ s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// Helper to fetch size and etag of an object using a StatObject call.
+func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) {
+ // Get object info - need size and etag here. Also, decryption
+ // headers are added to the stat request if given.
+ var objInfo ObjectInfo
+ rh := NewGetReqHeaders()
+ for k, v := range s.decryptKey.getSSEHeaders(false) {
+ rh.Set(k, v)
+ }
+ objInfo, err = c.statObject(s.bucket, s.object, rh)
+ if err != nil {
+ err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
+ } else {
+ size = objInfo.Size
+ etag = objInfo.ETag
+ userMeta = make(map[string]string)
+ for k, v := range objInfo.Metadata {
+ if strings.HasPrefix(k, "x-amz-meta-") {
+ if len(v) > 0 {
+ userMeta[k] = v[0]
+ }
+ }
+ }
+ }
+ return
+}
+
+// uploadPartCopy - helper function to create a part in a multipart
+// upload via an upload-part-copy request
+// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
+func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
+ headers http.Header) (p CompletePart, err error) {
+
+ // Build query parameters
+ urlValues := make(url.Values)
+ urlValues.Set("partNumber", strconv.Itoa(partNumber))
+ urlValues.Set("uploadId", uploadID)
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: bucket,
+ objectName: object,
+ customHeader: headers,
+ queryValues: urlValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return p, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, bucket, object)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partNumber, cpObjRes.ETag
+ return p, nil
+}
+
+// ComposeObject - creates an object using server-side copying of
+// existing objects. It takes a list of source objects (with optional
+// offsets) and concatenates them into a new object using only
+// server-side copying operations.
+func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
+ if len(srcs) < 1 || len(srcs) > maxPartsCount {
+ return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
+ }
+
+ srcSizes := make([]int64, len(srcs))
+ var totalSize, size, totalParts int64
+ var srcUserMeta map[string]string
+ var etag string
+ var err error
+ for i, src := range srcs {
+ size, etag, srcUserMeta, err = src.getProps(c)
+ if err != nil {
+ return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
+ }
+
+ // Error out if client side encryption is used in this source object when
+ // more than one source objects are given.
+ if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" {
+ return ErrInvalidArgument(
+ fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object))
+ }
+
+ // Since we did a HEAD to get size, we use the ETag
+ // value to make sure the object has not changed by
+ // the time we perform the copy. This is done, only if
+ // the user has not set their own ETag match
+ // condition.
+ if src.Headers.Get("x-amz-copy-source-if-match") == "" {
+ src.SetMatchETagCond(etag)
+ }
+
+ // Check if a segment is specified, and if so, is the
+ // segment within object bounds?
+ if src.start != -1 {
+ // Since range is specified,
+ // 0 <= src.start <= src.end
+ // so only invalid case to check is:
+ if src.end >= size {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)",
+ i, src.start, src.end, size))
+ }
+ size = src.end - src.start + 1
+ }
+
+ // Only the last source may be less than `absMinPartSize`
+ if size < absMinPartSize && i < len(srcs)-1 {
+ return ErrInvalidArgument(
+ fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size))
+ }
+
+ // Is data to copy too large?
+ totalSize += size
+ if totalSize > maxMultipartPutObjectSize {
+ return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize))
+ }
+
+ // record source size
+ srcSizes[i] = size
+
+ // calculate parts needed for current source
+ totalParts += partsRequired(size)
+ // Do we need more parts than we are allowed?
+ if totalParts > maxPartsCount {
+ return ErrInvalidArgument(fmt.Sprintf(
+ "Your proposed compose object requires more than %d parts", maxPartsCount))
+ }
+ }
+
+ // Single source object case (i.e. when only one source is
+ // involved, it is being copied wholly and at most 5GiB in
+ // size).
+ if totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize {
+ h := srcs[0].Headers
+ // Add destination encryption headers
+ for k, v := range dst.encryption.getSSEHeaders(false) {
+ h.Set(k, v)
+ }
+
+ // If no user metadata is specified (and so, the
+ // for-loop below is not entered), metadata from the
+ // source is copied to the destination (due to
+ // single-part copy-object PUT request behaviour).
+ for k, v := range dst.getUserMetaHeadersMap(true) {
+ h.Set(k, v)
+ }
+
+ // Send copy request
+ resp, err := c.executeMethod("PUT", requestMetadata{
+ bucketName: dst.bucket,
+ objectName: dst.object,
+ customHeader: h,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, dst.bucket, dst.object)
+ }
+
+ // Return nil on success.
+ return nil
+ }
+
+ // Now, handle multipart-copy cases.
+
+ // 1. Initiate a new multipart upload.
+
+ // Set user-metadata on the destination object. If no
+ // user-metadata is specified, and there is only one source,
+ // (only) then metadata from source is copied.
+ userMeta := dst.getUserMetaHeadersMap(false)
+ metaMap := userMeta
+ if len(userMeta) == 0 && len(srcs) == 1 {
+ metaMap = srcUserMeta
+ }
+ metaHeaders := make(map[string][]string)
+ for k, v := range metaMap {
+ metaHeaders[k] = append(metaHeaders[k], v)
+ }
+ uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
+ if err != nil {
+ return fmt.Errorf("Error creating new upload: %v", err)
+ }
+
+ // 2. Perform copy part uploads
+ objParts := []CompletePart{}
+ partIndex := 1
+ for i, src := range srcs {
+ h := src.Headers
+ // Add destination encryption headers
+ for k, v := range dst.encryption.getSSEHeaders(false) {
+ h.Set(k, v)
+ }
+
+ // calculate start/end indices of parts after
+ // splitting.
+ startIdx, endIdx := calculateEvenSplits(srcSizes[i], src)
+ for j, start := range startIdx {
+ end := endIdx[j]
+
+ // Add (or reset) source range header for
+ // upload part copy request.
+ h.Set("x-amz-copy-source-range",
+ fmt.Sprintf("bytes=%d-%d", start, end))
+
+ // make upload-part-copy request
+ complPart, err := c.uploadPartCopy(dst.bucket,
+ dst.object, uploadID, partIndex, h)
+ if err != nil {
+ return fmt.Errorf("Error in upload-part-copy - %v", err)
+ }
+ objParts = append(objParts, complPart)
+ partIndex++
+ }
+ }
+
+ // 3. Make final complete-multipart request.
+ _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
+ completeMultipartUpload{Parts: objParts})
+ if err != nil {
+ err = fmt.Errorf("Error in complete-multipart request - %v", err)
+ }
+ return err
+}
+
+// partsRequired is ceiling(size / copyPartSize)
+func partsRequired(size int64) int64 {
+ r := size / copyPartSize
+ if size%copyPartSize > 0 {
+ r++
+ }
+ return r
+}
+
+// calculateEvenSplits - computes splits for a source and returns
+// start and end index slices. Splits happen evenly to be sure that no
+// part is less than 5MiB, as that could fail the multipart request if
+// it is not the last part.
+func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) {
+ if size == 0 {
+ return
+ }
+
+ reqParts := partsRequired(size)
+ startIndex = make([]int64, reqParts)
+ endIndex = make([]int64, reqParts)
+ // Compute number of required parts `k`, as:
+ //
+ // k = ceiling(size / copyPartSize)
+ //
+ // Now, distribute the `size` bytes in the source into
+ // k parts as evenly as possible:
+ //
+ // r parts sized (q+1) bytes, and
+ // (k - r) parts sized q bytes, where
+ //
+ // size = q * k + r (by simple division of size by k,
+ // so that 0 <= r < k)
+ //
+ start := src.start
+ if start == -1 {
+ start = 0
+ }
+ quot, rem := size/reqParts, size%reqParts
+ nextStart := start
+ for j := int64(0); j < reqParts; j++ {
+ curPartSize := quot
+ if j < rem {
+ curPartSize++
+ }
+
+ cStart := nextStart
+ cEnd := cStart + curPartSize - 1
+ nextStart = cEnd + 1
+
+ startIndex[j], endIndex[j] = cStart, cEnd
+ }
+ return
+}
diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go
index 0871b1cfb20df565c455ff0b5278454f254e24d4..ab2aa4af2c86cfafc5da604577277858b0845542 100644
--- a/vendor/github.com/minio/minio-go/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-datatypes.go
@@ -16,7 +16,10 @@
package minio
-import "time"
+import (
+ "net/http"
+ "time"
+)
// BucketInfo container for bucket metadata.
type BucketInfo struct {
@@ -38,6 +41,10 @@ type ObjectInfo struct {
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
+ // Collection of additional metadata on the object.
+ // eg: x-amz-meta-*, content-encoding etc.
+ Metadata http.Header `json:"metadata"`
+
// Owner name.
Owner struct {
DisplayName string `json:"name"`
diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go
index e6789aff537326794883677f14579df39c91bc50..e0019a3347aa872d893637b627f1c7c75d387673 100644
--- a/vendor/github.com/minio/minio-go/api-error-response.go
+++ b/vendor/github.com/minio/minio-go/api-error-response.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,6 +48,9 @@ type ErrorResponse struct {
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region string
+
+ // Headers of the returned S3 XML error
+ Headers http.Header `xml:"-" json:"-"`
}
// ToErrorResponse - Returns parsed ErrorResponse struct from body and
@@ -72,8 +75,15 @@ func ToErrorResponse(err error) ErrorResponse {
}
}
-// Error - Returns HTTP error string
+// Error - Returns S3 error string.
func (e ErrorResponse) Error() string {
+ if e.Message == "" {
+ msg, ok := s3ErrorResponseMap[e.Code]
+ if !ok {
+ msg = fmt.Sprintf("Error response code %s.", e.Code)
+ }
+ return msg
+ }
return e.Message
}
@@ -91,6 +101,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
return ErrInvalidArgument(msg)
}
var errResp ErrorResponse
+
err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
if err != nil {
@@ -101,9 +112,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
}
} else {
errResp = ErrorResponse{
@@ -111,9 +119,6 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Message: "The specified key does not exist.",
BucketName: bucketName,
Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
}
}
case http.StatusForbidden:
@@ -122,33 +127,60 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
Message: "Access Denied.",
BucketName: bucketName,
Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
}
case http.StatusConflict:
errResp = ErrorResponse{
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
+ }
+ case http.StatusPreconditionFailed:
+ errResp = ErrorResponse{
+ Code: "PreconditionFailed",
+ Message: s3ErrorResponseMap["PreconditionFailed"],
+ BucketName: bucketName,
+ Key: objectName,
}
default:
errResp = ErrorResponse{
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
}
}
}
+
+ // Save hodID, requestID and region information
+ // from headers if not available through error XML.
+ if errResp.RequestID == "" {
+ errResp.RequestID = resp.Header.Get("x-amz-request-id")
+ }
+ if errResp.HostID == "" {
+ errResp.HostID = resp.Header.Get("x-amz-id-2")
+ }
+ if errResp.Region == "" {
+ errResp.Region = resp.Header.Get("x-amz-bucket-region")
+ }
+ if errResp.Code == "InvalidRegion" && errResp.Region != "" {
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
+ }
+
+ // Save headers returned in the API XML error
+ errResp.Headers = resp.Header
+
return errResp
}
+// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
+func ErrTransferAccelerationBucket(bucketName string) error {
+ msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
+ return ErrorResponse{
+ Code: "InvalidArgument",
+ Message: msg,
+ BucketName: bucketName,
+ }
+}
+
// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
@@ -201,16 +233,6 @@ func ErrInvalidObjectName(message string) error {
}
}
-// ErrInvalidParts - Invalid number of parts.
-func ErrInvalidParts(expectedParts, uploadedParts int) error {
- msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
- return ErrorResponse{
- Code: "InvalidParts",
- Message: msg,
- RequestID: "minio",
- }
-}
-
// ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName
@@ -223,3 +245,23 @@ func ErrInvalidArgument(message string) error {
RequestID: "minio",
}
}
+
+// ErrNoSuchBucketPolicy - No Such Bucket Policy response
+// The specified bucket does not have a bucket policy.
+func ErrNoSuchBucketPolicy(message string) error {
+ return ErrorResponse{
+ Code: "NoSuchBucketPolicy",
+ Message: message,
+ RequestID: "minio",
+ }
+}
+
+// ErrAPINotSupported - API not supported response
+// The specified API call is not supported
+func ErrAPINotSupported(message string) error {
+ return ErrorResponse{
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go
index 265a58eea76efc260c1f878d19f113f696c3ef07..c4193e934f0747620a1172a7d2301e0fd7a9024f 100644
--- a/vendor/github.com/minio/minio-go/api-get-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-get-object-file.go
@@ -20,15 +20,17 @@ import (
"io"
"os"
"path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// FGetObject - download contents of an object to a local file.
func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
@@ -48,7 +50,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
}
}
- // Extract top level direcotry.
+ // Extract top level directory.
objectDir, _ := filepath.Split(filePath)
if objectDir != "" {
// Create any missing top level directories.
@@ -78,8 +80,15 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
return err
}
+ // Initialize get object request headers to set the
+ // appropriate range offsets to read from.
+ reqHeaders := NewGetReqHeaders()
+ if st.Size() > 0 {
+ reqHeaders.SetRange(st.Size(), 0)
+ }
+
// Seek to current position for incoming reader.
- objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0)
+ objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders)
if err != nil {
return err
}
diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go
index b5b74ff72dc5fc77f3e2329603d35e09627482ba..9bd784ffa8acfdebe9d166f61f5b9fbb7042503f 100644
--- a/vendor/github.com/minio/minio-go/api-get-object.go
+++ b/vendor/github.com/minio/minio-go/api-get-object.go
@@ -1,5 +1,5 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,28 +24,55 @@ import (
"strings"
"sync"
"time"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/s3utils"
)
+// GetEncryptedObject deciphers and streams data stored in the server after applying a specified encryption materials,
+// returned stream should be closed by the caller.
+func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error) {
+ if encryptMaterials == nil {
+ return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
+ }
+
+ // Fetch encrypted object
+ encReader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ return nil, err
+ }
+ // Stat object to get its encryption metadata
+ st, err := encReader.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ // Setup object for decrytion, object is transparently
+ // decrypted as the consumer starts reading.
+ encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
+
+ // Success.
+ return encryptMaterials, nil
+}
+
// GetObject - returns an seekable, readable object.
func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
- // Start the request as soon Get is initiated.
- httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
- if err != nil {
- return nil, err
- }
+ var httpReader io.ReadCloser
+ var objectInfo ObjectInfo
+ var err error
// Create request channel.
- reqCh := make(chan readRequest)
+ reqCh := make(chan getRequest)
// Create response channel.
- resCh := make(chan readResponse)
+ resCh := make(chan getResponse)
// Create done channel.
doneCh := make(chan struct{})
@@ -54,83 +81,262 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
defer close(reqCh)
defer close(resCh)
+ // Used to verify if etag of object has changed since last read.
+ var etag string
+
// Loop through the incoming control messages and read data.
for {
select {
// When the done channel is closed exit our routine.
case <-doneCh:
+ // Close the http response body before returning.
+ // This ends the connection with the server.
+ if httpReader != nil {
+ httpReader.Close()
+ }
return
- // Request message.
+
+ // Gather incoming request.
case req := <-reqCh:
- // Offset changes fetch the new object at an Offset.
- if req.DidOffsetChange {
- // Read from offset.
- httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ // If this is the first request we may not need to do a getObject request yet.
+ if req.isFirstReq {
+ // First request is a Read/ReadAt.
+ if req.isReadOp {
+ reqHeaders := NewGetReqHeaders()
+ // Differentiate between wanting the whole object and just a range.
+ if req.isReadAt {
+ // If this is a ReadAt request only get the specified range.
+ // Range is set with respect to the offset and length of the buffer requested.
+ // Do not set objectInfo from the first readAt request because it will not get
+ // the whole object.
+ reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ } else {
+ if req.Offset > 0 {
+ reqHeaders.SetRange(req.Offset, 0)
+ }
+
+ // First request is a Read request.
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ etag = objectInfo.ETag
+ // Read at least firstReq.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ }
+ } else {
+ // First request is a Stat or Seek call.
+ // Only need to run a StatObject until an actual Read or ReadAt request comes through.
+ objectInfo, err = c.StatObject(bucketName, objectName)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the go-routine.
+ return
+ }
+ etag = objectInfo.ETag
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ }
+ } else if req.settingObjectInfo { // Request is just to get objectInfo.
+ reqHeaders := NewGetReqHeaders()
+ if etag != "" {
+ reqHeaders.SetMatchETag(etag)
+ }
+ objectInfo, err := c.statObject(bucketName, objectName, reqHeaders)
if err != nil {
- resCh <- readResponse{
+ resCh <- getResponse{
Error: err,
}
+ // Exit the goroutine.
return
}
- }
+ // Send back the objectInfo.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ } else {
+ // Offset changes fetch the new object at an Offset.
+ // Because the httpReader may not be set by the first
+ // request if it was a stat or seek it must be checked
+ // if the object has been read or not to only initialize
+ // new ones when they haven't been already.
+ // All readAt requests are new requests.
+ if req.DidOffsetChange || !req.beenRead {
+ reqHeaders := NewGetReqHeaders()
+ if etag != "" {
+ reqHeaders.SetMatchETag(etag)
+ }
+ if httpReader != nil {
+ // Close previously opened http reader.
+ httpReader.Close()
+ }
+ // If this request is a readAt only get the specified range.
+ if req.isReadAt {
+ // Range is set with respect to the offset and length of the buffer requested.
+ reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders)
+ } else {
+ // Range is set with respect to the offset.
+ if req.Offset > 0 {
+ reqHeaders.SetRange(req.Offset, 0)
+ }
+
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ }
- // Read at least req.Buffer bytes, if not we have
- // reached our EOF.
- size, err := io.ReadFull(httpReader, req.Buffer)
- if err == io.ErrUnexpectedEOF {
- // If an EOF happens after reading some but not
- // all the bytes ReadFull returns ErrUnexpectedEOF
- err = io.EOF
- }
- // Reply back how much was read.
- resCh <- readResponse{
- Size: int(size),
- Error: err,
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Reply back how much was read.
+ resCh <- getResponse{
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ objectInfo: objectInfo,
+ }
}
}
}
}()
- // Return the readerAt backed by routine.
- return newObject(reqCh, resCh, doneCh, objectInfo), nil
-}
-// Read response message container to reply back for the request.
-type readResponse struct {
- Size int
- Error error
+ // Create a newObject through the information sent back by reqCh.
+ return newObject(reqCh, resCh, doneCh), nil
}
-// Read request message container to communicate with internal
+// get request message container to communicate with internal
// go-routine.
-type readRequest struct {
- Buffer []byte
- Offset int64 // readAt offset.
- DidOffsetChange bool
+type getRequest struct {
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool // Tracks the offset changes for Seek requests.
+ beenRead bool // Determines if this is the first time an object is being read.
+ isReadAt bool // Determines if this request is a request to a specific range
+ isReadOp bool // Determines if this request is a Read or Read/At request.
+ isFirstReq bool // Determines if this request is the first time an object is being accessed.
+ settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
}
-// Object represents an open object. It implements Read, ReadAt,
-// Seeker, Close for a HTTP stream.
+// get response message container to reply back for the request.
+type getResponse struct {
+ Size int
+ Error error
+ didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
+ objectInfo ObjectInfo // Used for the first request.
+}
+
+// Object represents an open object. It implements
+// Reader, ReaderAt, Seeker, Closer for a HTTP stream.
type Object struct {
// Mutex.
mutex *sync.Mutex
// User allocated and defined.
- reqCh chan<- readRequest
- resCh <-chan readResponse
+ reqCh chan<- getRequest
+ resCh <-chan getResponse
doneCh chan<- struct{}
- prevOffset int64
currOffset int64
objectInfo ObjectInfo
+ // Ask lower level to initiate data fetching based on currOffset
+ seekData bool
+
// Keeps track of closed call.
isClosed bool
+ // Keeps track of if this is the first call.
+ isStarted bool
+
// Previous error saved for future calls.
prevErr error
+
+ // Keeps track of if this object has been read yet.
+ beenRead bool
+
+ // Keeps track of if objectInfo has been set yet.
+ objectInfoSet bool
}
-// Read reads up to len(p) bytes into p. It returns the number of
-// bytes read (0 <= n <= len(p)) and any error encountered. Returns
+// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
+// Returns back the size of the buffer read, if anything was read, as well
+// as any error encountered. For all first requests sent on the object
+// it is also responsible for sending back the objectInfo.
+func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
+ o.reqCh <- request
+ response := <-o.resCh
+
+ // Return any error to the top level.
+ if response.Error != nil {
+ return response, response.Error
+ }
+
+ // This was the first request.
+ if !o.isStarted {
+ // The object has been operated on.
+ o.isStarted = true
+ }
+ // Set the objectInfo if the request was not readAt
+ // and it hasn't been set before.
+ if !o.objectInfoSet && !request.isReadAt {
+ o.objectInfo = response.objectInfo
+ o.objectInfoSet = true
+ }
+ // Set beenRead only if it has not been set before.
+ if !o.beenRead {
+ o.beenRead = response.didRead
+ }
+ // Data are ready on the wire, no need to reinitiate connection in lower level
+ o.seekData = false
+
+ return response, nil
+}
+
+// setOffset - handles the setting of offsets for
+// Read/ReadAt/Seek requests.
+func (o *Object) setOffset(bytesRead int64) error {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+
+ if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size {
+ return io.EOF
+ }
+ return nil
+}
+
+// Read reads up to len(b) bytes into b. It returns the number of
+// bytes read (0 <= n <= len(b)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
if o == nil {
@@ -145,64 +351,46 @@ func (o *Object) Read(b []byte) (n int, err error) {
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
-
- // If current offset has reached Size limit, return EOF.
- if o.currOffset >= o.objectInfo.Size {
- return 0, io.EOF
+ // Create a new request.
+ readReq := getRequest{
+ isReadOp: true,
+ beenRead: o.beenRead,
+ Buffer: b,
}
- // Send current information over control channel to indicate we are ready.
- reqMsg := readRequest{}
- // Send the pointer to the buffer over the channel.
- reqMsg.Buffer = b
-
- // Verify if offset has changed and currOffset is greater than
- // previous offset. Perhaps due to Seek().
- offsetChange := o.prevOffset - o.currOffset
- if offsetChange < 0 {
- offsetChange = -offsetChange
- }
- if offsetChange > 0 {
- // Fetch the new reader at the current offset again.
- reqMsg.Offset = o.currOffset
- reqMsg.DidOffsetChange = true
- } else {
- // No offset changes no need to fetch new reader, continue
- // reading.
- reqMsg.DidOffsetChange = false
- reqMsg.Offset = 0
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readReq.isFirstReq = true
}
- // Send read request over the control channel.
- o.reqCh <- reqMsg
+ // Ask to establish a new data fetch routine based on seekData flag
+ readReq.DidOffsetChange = o.seekData
+ readReq.Offset = o.currOffset
- // Get data over the response channel.
- dataMsg := <-o.resCh
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readReq)
+ if err != nil && err != io.EOF {
+ // Save the error for future calls.
+ o.prevErr = err
+ return response.Size, err
+ }
// Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save the current offset as previous offset.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currOffset read is equal to objectSize
- // We have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
- }
- return dataMsg.Size, nil
+ bytesRead := int64(response.Size)
+
+ // Set the new offset.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ // Save the error for future calls.
+ o.prevErr = oerr
+ return response.Size, oerr
}
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
+ // Return the response.
+ return response.Size, err
}
-// Stat returns the ObjectInfo structure describing object.
+// Stat returns the ObjectInfo structure describing Object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
@@ -211,10 +399,25 @@ func (o *Object) Stat() (ObjectInfo, error) {
o.mutex.Lock()
defer o.mutex.Unlock()
- if o.prevErr != nil || o.isClosed {
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
return ObjectInfo{}, o.prevErr
}
+ // This is the first request.
+ if !o.isStarted || !o.objectInfoSet {
+ statReq := getRequest{
+ isFirstReq: !o.isStarted,
+ settingObjectInfo: !o.objectInfoSet,
+ }
+
+ // Send the request and get the response.
+ _, err := o.doGetRequest(statReq)
+ if err != nil {
+ o.prevErr = err
+ return ObjectInfo{}, err
+ }
+ }
+
return o.objectInfo, nil
}
@@ -236,56 +439,55 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
return 0, o.prevErr
}
- // If offset is negative and offset is greater than or equal to
- // object size we return EOF.
- if offset < 0 || offset >= o.objectInfo.Size {
- return 0, io.EOF
+ // Can only compare offsets to size when size has been set.
+ if o.objectInfoSet {
+ // If offset is negative than we return io.EOF.
+ // If offset is greater than or equal to object size we return io.EOF.
+ if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 {
+ return 0, io.EOF
+ }
}
- // Send current information over control channel to indicate we
- // are ready.
- reqMsg := readRequest{}
-
- // Send the offset and pointer to the buffer over the channel.
- reqMsg.Buffer = b
-
- // For ReadAt offset always changes, minor optimization where
- // offset same as currOffset we don't change the offset.
- reqMsg.DidOffsetChange = offset != o.currOffset
- if reqMsg.DidOffsetChange {
- // Set new offset.
- reqMsg.Offset = offset
- // Save new offset as current offset.
- o.currOffset = offset
+ // Create the new readAt request.
+ readAtReq := getRequest{
+ isReadOp: true,
+ isReadAt: true,
+ DidOffsetChange: true, // Offset always changes.
+ beenRead: o.beenRead, // Set if this is the first request to try and read.
+ Offset: offset, // Set the offset.
+ Buffer: b,
}
- // Send read request over the control channel.
- o.reqCh <- reqMsg
-
- // Get data over the response channel.
- dataMsg := <-o.resCh
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readAtReq.isFirstReq = true
+ }
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readAtReq)
+ if err != nil && err != io.EOF {
+ // Save the error.
+ o.prevErr = err
+ return response.Size, err
+ }
// Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save current offset as previous offset before returning.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currentOffset is equal to objectSize
- // we have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
+ bytesRead := int64(response.Size)
+ // There is no valid objectInfo yet
+ // to compare against for EOF.
+ if !o.objectInfoSet {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ } else {
+ // If this was not the first request update
+ // the offsets and compare against objectInfo
+ // for EOF.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ o.prevErr = oerr
+ return response.Size, oerr
}
- return dataMsg.Size, nil
}
-
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
+ return response.Size, err
}
// Seek sets the offset for the next Read or Write to offset,
@@ -318,24 +520,43 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
}
- // Save current offset as previous offset.
- o.prevOffset = o.currOffset
+ // This is the first request. So before anything else
+ // get the ObjectInfo.
+ if !o.isStarted || !o.objectInfoSet {
+ // Create the new Seek request.
+ seekReq := getRequest{
+ isReadOp: false,
+ Offset: offset,
+ isFirstReq: true,
+ }
+ // Send and receive from the seek request.
+ _, err := o.doGetRequest(seekReq)
+ if err != nil {
+ // Save the error.
+ o.prevErr = err
+ return 0, err
+ }
+ }
// Switch through whence.
switch whence {
default:
return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence))
case 0:
- if offset > o.objectInfo.Size {
+ if o.objectInfo.Size > -1 && offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset = offset
case 1:
- if o.currOffset+offset > o.objectInfo.Size {
+ if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size {
return 0, io.EOF
}
o.currOffset += offset
case 2:
+ // If we don't know the object size return an error for io.SeekEnd
+ if o.objectInfo.Size < 0 {
+ return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown")
+ }
// Seeking to positive offset is valid for whence '2', but
// since we are backing a Reader we have reached 'EOF' if
// offset is positive.
@@ -346,8 +567,17 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
}
- o.currOffset += offset
+ o.currOffset = o.objectInfo.Size + offset
}
+ // Reset the saved error since we successfully seeked, let the Read
+ // and ReadAt decide.
+ if o.prevErr == io.EOF {
+ o.prevErr = nil
+ }
+
+ // Ask lower level to fetch again from source
+ o.seekData = true
+
// Return the effective offset.
return o.currOffset, nil
}
@@ -379,13 +609,13 @@ func (o *Object) Close() (err error) {
}
// newObject instantiates a new *minio.Object*
-func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object {
+// ObjectInfo will be set by setObjectInfo
+func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
return &Object{
- mutex: &sync.Mutex{},
- reqCh: reqCh,
- resCh: resCh,
- doneCh: doneCh,
- objectInfo: objectInfo,
+ mutex: &sync.Mutex{},
+ reqCh: reqCh,
+ resCh: resCh,
+ doneCh: doneCh,
}
}
@@ -396,30 +626,27 @@ func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) {
+func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, ObjectInfo{}, err
}
+ // Set all the necessary reqHeaders.
customHeader := make(http.Header)
- // Set ranges if length and offset are valid.
- if length > 0 && offset >= 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
- } else if offset > 0 && length == 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset))
- } else if length < 0 && offset == 0 {
- customHeader.Set("Range", fmt.Sprintf("bytes=%d", length))
+ for key, value := range reqHeaders.Header {
+ customHeader[key] = value
}
// Execute GET on objectName.
resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentSHA256Bytes: emptySHA256,
})
if err != nil {
return nil, ObjectInfo{}, err
@@ -446,17 +673,24 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
+
// Get content-type.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = resp.ContentLength
- objectStat.LastModified = date
- objectStat.ContentType = contentType
+
+ objectStat := ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: resp.ContentLength,
+ LastModified: date,
+ ContentType: contentType,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
+ }
// do not close body here, caller will close
return resp.Body, objectStat, nil
diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go
index 1004461b2c1073ebaee3f31deb151d7791d7cfac..10ccdc66b1c78ef920c34467d89520efac73c717 100644
--- a/vendor/github.com/minio/minio-go/api-get-policy.go
+++ b/vendor/github.com/minio/minio-go/api-get-policy.go
@@ -17,31 +17,62 @@
package minio
import (
- "io"
+ "encoding/json"
"io/ioutil"
"net/http"
"net/url"
- "sort"
+
+ "github.com/minio/minio-go/pkg/policy"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketPolicy - get bucket policy at a given path.
-func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
+func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ policyInfo, err := c.getBucketPolicy(bucketName)
+ if err != nil {
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchBucketPolicy" {
+ return policy.BucketPolicyNone, nil
+ }
+ return policy.BucketPolicyNone, err
+ }
+ return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
+}
+
+// ListBucketPolicies - list all policies for a given prefix and all its children.
+func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return BucketPolicyNone, err
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return map[string]policy.BucketPolicy{}, err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return BucketPolicyNone, err
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return map[string]policy.BucketPolicy{}, err
}
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ policyInfo, err := c.getBucketPolicy(bucketName)
if err != nil {
- return BucketPolicyNone, err
+ errResponse := ToErrorResponse(err)
+ if errResponse.Code == "NoSuchBucketPolicy" {
+ return map[string]policy.BucketPolicy{}, nil
+ }
+ return map[string]policy.BucketPolicy{}, err
}
- return identifyPolicyType(policy, bucketName, objectPrefix), nil
+ return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
-// Request server for policy.
-func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
+// Default empty bucket access policy.
+var emptyBucketAccessPolicy = policy.BucketAccessPolicy{
+ Version: "2012-10-17",
+}
+
+// Request server for current bucket policy.
+func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@@ -49,44 +80,28 @@ func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketA
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
- return BucketAccessPolicy{}, err
+ return emptyBucketAccessPolicy, err
}
- return processBucketPolicyResponse(bucketName, resp)
-}
-
-// processes the GetPolicy http response from the server.
-func processBucketPolicyResponse(bucketName string, resp *http.Response) (BucketAccessPolicy, error) {
if resp != nil {
if resp.StatusCode != http.StatusOK {
- errResponse := httpRespToErrorResponse(resp, bucketName, "")
- if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
- return BucketAccessPolicy{Version: "2012-10-17"}, nil
- }
- return BucketAccessPolicy{}, errResponse
+ return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "")
}
}
- // Read access policy up to maxAccessPolicySize.
- // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
- // bucket policies are limited to 20KB in size, using a limit reader.
- bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
+
+ bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return BucketAccessPolicy{}, err
+ return emptyBucketAccessPolicy, err
}
- // Sort the policy actions and resources for convenience.
- for _, statement := range policy.Statements {
- sort.Strings(statement.Actions)
- sort.Strings(statement.Resources)
- }
- return policy, nil
+
+ policy := policy.BucketAccessPolicy{}
+ err = json.Unmarshal(bucketPolicyBuf, &policy)
+ return policy, err
}
diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go
index e61763db9e62a63c6a871d99a95df953d37fe495..6de1fe9b3f8beaaefe3b568e69e4dbd4236bb33e 100644
--- a/vendor/github.com/minio/minio-go/api-list.go
+++ b/vendor/github.com/minio/minio-go/api-list.go
@@ -17,10 +17,13 @@
package minio
import (
+ "errors"
"fmt"
"net/http"
"net/url"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// ListBuckets list all buckets owned by this authenticated user.
@@ -35,7 +38,7 @@ import (
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service.
- resp, err := c.executeMethod("GET", requestMetadata{})
+ resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256})
defer closeResponse(resp)
if err != nil {
return nil, err
@@ -53,6 +56,196 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
return listAllMyBucketsResult.Buckets.Bucket, nil
}
+/// Bucket Read Operations.
+
+// ListObjectsV2 lists all objects matching the objectPrefix from
+// the specified bucket. If recursion is enabled it would list
+// all subdirectories and all its contents.
+//
+// Your input parameters are just bucketName, objectPrefix, recursive
+// and a done channel for pro-actively closing the internal go
+// routine. If you enable recursive as 'true' this function will
+// return back all the objects in a given bucket name and object
+// prefix.
+//
+// api := client.New(....)
+// // Create a done channel.
+// doneCh := make(chan struct{})
+// defer close(doneCh)
+// // Recursively list all objects in 'mytestbucket'
+// recursive := true
+// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) {
+// fmt.Println(message)
+// }
+//
+func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
+ // Allocate new list objects channel.
+ objectStatCh := make(chan ObjectInfo, 1)
+ // Default listing is delimited at "/"
+ delimiter := "/"
+ if recursive {
+ // If recursive we do not delimit.
+ delimiter = ""
+ }
+
+ // Return object owner information by default
+ fetchOwner := true
+
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Validate incoming object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ defer close(objectStatCh)
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return objectStatCh
+ }
+
+ // Initiate list objects goroutine here.
+ go func(objectStatCh chan<- ObjectInfo) {
+ defer close(objectStatCh)
+ // Save continuationToken for next request.
+ var continuationToken string
+ for {
+ // Get list of objects a maximum of 1000 per request.
+ result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
+ if err != nil {
+ objectStatCh <- ObjectInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // If contents are available loop through and send over channel.
+ for _, object := range result.Contents {
+ select {
+ // Send object content.
+ case objectStatCh <- object:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // Send all common prefixes if any.
+ // NOTE: prefixes are only present if the request is delimited.
+ for _, obj := range result.CommonPrefixes {
+ select {
+ // Send object prefixes.
+ case objectStatCh <- ObjectInfo{
+ Key: obj.Prefix,
+ Size: 0,
+ }:
+ // If receives done from the caller, return here.
+ case <-doneCh:
+ return
+ }
+ }
+
+ // If continuation token present, save it for next request.
+ if result.NextContinuationToken != "" {
+ continuationToken = result.NextContinuationToken
+ }
+
+ // Listing ends result is not truncated, return right here.
+ if !result.IsTruncated {
+ return
+ }
+ }
+ }(objectStatCh)
+ return objectStatCh
+}
+
+// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket.
+//
+// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
+// request parameters :-
+// ---------
+// ?continuation-token - Specifies the key to start with when listing objects in a bucket.
+// ?delimiter - A delimiter is a character you use to group keys.
+// ?prefix - Limits the response to keys that begin with the specified prefix.
+// ?max-keys - Sets the maximum number of keys returned in the response body.
+func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
+ // Validate bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Validate object prefix.
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketV2Result{}, err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+
+ // Always set list-type in ListObjects V2
+ urlValues.Set("list-type", "2")
+
+ // Set object prefix.
+ if objectPrefix != "" {
+ urlValues.Set("prefix", objectPrefix)
+ }
+ // Set continuation token
+ if continuationToken != "" {
+ urlValues.Set("continuation-token", continuationToken)
+ }
+ // Set delimiter.
+ if delimiter != "" {
+ urlValues.Set("delimiter", delimiter)
+ }
+
+ // Fetch owner when listing
+ if fetchOwner {
+ urlValues.Set("fetch-owner", "true")
+ }
+
+ // maxkeys should default to 1000 or less.
+ if maxkeys == 0 || maxkeys > 1000 {
+ maxkeys = 1000
+ }
+ // Set max keys.
+ urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ListBucketV2Result{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Decode listBuckets XML.
+ listBucketResult := ListBucketV2Result{}
+ if err = xmlDecoder(resp.Body, &listBucketResult); err != nil {
+ return listBucketResult, err
+ }
+
+ // This is an additional verification check to make
+ // sure proper responses are received.
+ if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" {
+ return listBucketResult, errors.New("Truncated response should have continuation token set")
+ }
+
+ // Success.
+ return listBucketResult, nil
+}
+
// ListObjects - (List Objects) - List some objects or all recursively.
//
// ListObjects lists all objects matching the objectPrefix from
@@ -77,7 +270,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) {
//
func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo {
// Allocate new list objects channel.
- objectStatCh := make(chan ObjectInfo)
+ objectStatCh := make(chan ObjectInfo, 1)
// Default listing is delimited at "/"
delimiter := "/"
if recursive {
@@ -85,7 +278,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
delimiter = ""
}
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -93,7 +286,7 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
return objectStatCh
}
// Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectStatCh)
objectStatCh <- ObjectInfo{
Err: err,
@@ -158,8 +351,6 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
return objectStatCh
}
-/// Bucket Read Operations.
-
// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket.
//
// You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
@@ -169,14 +360,14 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) {
+func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) {
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
- return listBucketResult{}, err
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ListBucketResult{}, err
}
// Validate object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
- return listBucketResult{}, err
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
+ return ListBucketResult{}, err
}
// Get resources properly escaped and lined up before
// using them in http request.
@@ -203,20 +394,21 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
- return listBucketResult{}, err
+ return ListBucketResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode listBuckets XML.
- listBucketResult := listBucketResult{}
+ listBucketResult := ListBucketResult{}
err = xmlDecoder(resp.Body, &listBucketResult)
if err != nil {
return listBucketResult, err
@@ -254,7 +446,7 @@ func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive
// listIncompleteUploads lists all incomplete uploads.
func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
- objectMultipartStatCh := make(chan ObjectMultipartInfo)
+ objectMultipartStatCh := make(chan ObjectMultipartInfo, 1)
// Delimiter is set to "/" by default.
delimiter := "/"
if recursive {
@@ -262,7 +454,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
delimiter = ""
}
// Validate bucket name.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
@@ -270,7 +462,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
return objectMultipartStatCh
}
// Validate incoming object prefix.
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
defer close(objectMultipartStatCh)
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
@@ -304,6 +496,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
+ continue
}
}
select {
@@ -349,7 +542,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body.
-func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) {
+func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set uploads.
@@ -380,20 +573,21 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
// Execute GET on bucketName to list multipart uploads.
resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
- return listMultipartUploadsResult{}, err
+ return ListMultipartUploadsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
+ return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "")
}
}
// Decode response body.
- listMultipartUploadsResult := listMultipartUploadsResult{}
+ listMultipartUploadsResult := ListMultipartUploadsResult{}
err = xmlDecoder(resp.Body, &listMultipartUploadsResult)
if err != nil {
return listMultipartUploadsResult, err
@@ -402,10 +596,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
}
// listObjectParts list all object parts recursively.
-func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) {
+func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) {
// Part number marker for the next batch of request.
var nextPartNumberMarker int
- partsInfo = make(map[int]objectPart)
+ partsInfo = make(map[int]ObjectPart)
for {
// Get list of uploaded parts a maximum of 1000 per request.
listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000)
@@ -480,7 +674,7 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (
// ?part-number-marker - Specifies the part after which listing should
// begin.
// ?max-parts - Maximum parts to be listed per request.
-func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) {
+func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) {
// Get resources properly escaped and lined up before using them in http request.
urlValues := make(url.Values)
// Set part number marker.
@@ -497,21 +691,22 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
// Execute GET on objectName to get list of parts.
resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
- return listObjectPartsResult{}, err
+ return ListObjectPartsResult{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Decode list object parts XML.
- listObjectPartsResult := listObjectPartsResult{}
+ listObjectPartsResult := ListObjectPartsResult{}
err = xmlDecoder(resp.Body, &listObjectPartsResult)
if err != nil {
return listObjectPartsResult, err
diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go
new file mode 100644
index 0000000000000000000000000000000000000000..25a283af5554104cb0b1d2898961cfa2eb0be600
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-notification.go
@@ -0,0 +1,225 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bufio"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// GetBucketNotification - get bucket notification at a given path.
+func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return BucketNotification{}, err
+ }
+ notification, err := c.getBucketNotification(bucketName)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return notification, nil
+}
+
+// Request server for notification rules.
+func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return processBucketNotificationResponse(bucketName, resp)
+
+}
+
+// processes the GetNotification http response from the server.
+func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ return BucketNotification{}, errResponse
+ }
+ var bucketNotification BucketNotification
+ err := xmlDecoder(resp.Body, &bucketNotification)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return bucketNotification, nil
+}
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// Notification event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// Notification event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// Notification event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// sourceInfo represents information on the client that
+// triggered the event notification.
+type sourceInfo struct {
+ Host string `json:"host"`
+ Port string `json:"port"`
+ UserAgent string `json:"userAgent"`
+}
+
+// NotificationEvent represents an Amazon an S3 bucket notification event.
+type NotificationEvent struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+ Source sourceInfo `json:"source"`
+}
+
+// NotificationInfo - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type NotificationInfo struct {
+ Records []NotificationEvent
+ Err error
+}
+
+// ListenBucketNotification - listen on bucket notifications.
+func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
+ notificationInfoCh := make(chan NotificationInfo, 1)
+ // Only success, start a routine to start reading line by line.
+ go func(notificationInfoCh chan<- NotificationInfo) {
+ defer close(notificationInfoCh)
+
+ // Validate the bucket name.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Check ARN partition to verify if listening bucket is supported
+ if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
+ notificationInfoCh <- NotificationInfo{
+ Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
+ }
+ return
+ }
+
+ // Continuously run and listen on bucket notification.
+ // Create a done channel to control 'ListObjects' go routine.
+ retryDoneCh := make(chan struct{}, 1)
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(retryDoneCh)
+
+ // Wait on the jitter retry loop.
+ for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
+ urlValues := make(url.Values)
+ urlValues.Set("prefix", prefix)
+ urlValues.Set("suffix", suffix)
+ urlValues["events"] = events
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
+ })
+ if err != nil {
+ continue
+ }
+
+ // Validate http response, upon error return quickly.
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ notificationInfoCh <- NotificationInfo{
+ Err: errResponse,
+ }
+ return
+ }
+
+ // Initialize a new bufio scanner, to read line by line.
+ bio := bufio.NewScanner(resp.Body)
+
+ // Close the response body.
+ defer resp.Body.Close()
+
+ // Unmarshal each line, returns marshalled values.
+ for bio.Scan() {
+ var notificationInfo NotificationInfo
+ if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil {
+ continue
+ }
+ // Send notifications on channel only if there are events received.
+ if len(notificationInfo.Records) > 0 {
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-doneCh:
+ return
+ }
+ }
+ }
+ // Look for any underlying errors.
+ if err = bio.Err(); err != nil {
+ // For an unexpected connection drop from server, we close the body
+ // and re-connect.
+ if err == io.ErrUnexpectedEOF {
+ resp.Body.Close()
+ }
+ }
+ }
+ }(notificationInfoCh)
+
+ // Returns the notification info channel, for caller to start reading from.
+ return notificationInfoCh
+}
diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go
index 83e9cde823c552ff3f6676e5db417cb5962cb4f5..c645828df9944daf0609c46d4751d4bf3d8e9615 100644
--- a/vendor/github.com/minio/minio-go/api-presigned.go
+++ b/vendor/github.com/minio/minio-go/api-presigned.go
@@ -20,14 +20,18 @@ import (
"errors"
"net/url"
"time"
+
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// supportedGetReqParams - supported request parameters for GET
-// presigned request.
+// supportedGetReqParams - supported request parameters for GET presigned request.
var supportedGetReqParams = map[string]struct{}{
"response-expires": {},
"response-content-type": {},
"response-cache-control": {},
+ "response-content-language": {},
+ "response-content-encoding": {},
"response-content-disposition": {},
}
@@ -38,10 +42,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return nil, err
}
if err := isValidExpiry(expires); err != nil {
@@ -66,8 +70,7 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
}
}
- // Save the request parameters to be used in presigning for
- // GET request.
+ // Save the request parameters to be used in presigning for GET request.
reqMetadata.queryValues = reqParams
}
@@ -81,19 +84,35 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
}
// PresignedGetObject - Returns a presigned URL to access an object
-// without credentials. Expires maximum is 7days - ie. 604800 and
-// minimum is 1. Additionally you can override a set of response
-// headers using the query parameters.
+// data without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
-// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
+// PresignedHeadObject - Returns a presigned URL to access object
+// metadata without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object
+// without credentials. URL can have a maximum expiry of upto 7days
+// or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
+// Presign - returns a presigned URL for any http method of your choice
+// along with custom request params. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec.
+func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL(method, bucketName, objectName, expires, reqParams)
+}
+
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments.
@@ -119,21 +138,38 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
+ // Get credentials from the configured credentials provider.
+ credValues, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var (
+ signerType = credValues.SignerType
+ sessionToken = credValues.SessionToken
+ accessKeyID = credValues.AccessKeyID
+ secretAccessKey = credValues.SecretAccessKey
+ )
+
+ if signerType.IsAnonymous() {
+ return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials")
+ }
+
// Keep time.
t := time.Now().UTC()
// For signature version '2' handle here.
- if c.signature.isV2() {
+ if signerType.IsV2() {
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
- if isGoogleEndpoint(c.endpointURL) {
- p.formData["GoogleAccessId"] = c.accessKeyID
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
+ p.formData["GoogleAccessId"] = accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
- p.formData["AWSAccessKeyId"] = c.accessKeyID
+ p.formData["AWSAccessKeyId"] = accessKeyID
}
// Sign the policy.
- p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
+ p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey)
return u, p.formData, nil
}
@@ -156,7 +192,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
- credential := getCredential(c.accessKeyID, location, t)
+ credential := s3signer.GetCredential(accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@@ -165,13 +201,27 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
return nil, nil, err
}
+ if sessionToken != "" {
+ if err = p.addNewPolicy(policyCondition{
+ matchType: "eq",
+ condition: "$x-amz-security-token",
+ value: sessionToken,
+ }); err != nil {
+ return nil, nil, err
+ }
+ }
+
// Get base64 encoded policy.
policyBase64 := p.base64()
+
// Fill in the form data.
p.formData["policy"] = policyBase64
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
- p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
+ if sessionToken != "" {
+ p.formData["x-amz-security-token"] = sessionToken
+ }
+ p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location)
return u, p.formData, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go
index 357f8b9921ce256c72c3ddf54678bc20755339cf..fd37dc192ad238150df8c60c632efbfc56a9384e 100644
--- a/vendor/github.com/minio/minio-go/api-put-bucket.go
+++ b/vendor/github.com/minio/minio-go/api-put-bucket.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,14 +19,14 @@ package minio
import (
"bytes"
- "encoding/base64"
- "encoding/hex"
"encoding/json"
"encoding/xml"
"fmt"
- "io/ioutil"
"net/http"
"net/url"
+
+ "github.com/minio/minio-go/pkg/policy"
+ "github.com/minio/minio-go/pkg/s3utils"
)
/// Bucket operations
@@ -37,71 +38,32 @@ import (
//
// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html
// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations
-func (c Client) MakeBucket(bucketName string, location string) error {
+func (c Client) MakeBucket(bucketName string, location string) (err error) {
+ defer func() {
+ // Save the location into cache on a successful makeBucket response.
+ if err == nil {
+ c.bucketLocCache.Set(bucketName, location)
+ }
+ }()
+
// Validate the input arguments.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil {
return err
}
// If location is empty, treat is a default region 'us-east-1'.
if location == "" {
location = "us-east-1"
- }
-
- // Instantiate the request.
- req, err := c.makeBucketRequest(bucketName, location)
- if err != nil {
- return err
- }
-
- // Execute the request.
- resp, err := c.do(req)
- defer closeResponse(resp)
- if err != nil {
- return err
- }
-
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, "")
+ // For custom region clients, default
+ // to custom region instead not 'us-east-1'.
+ if c.region != "" {
+ location = c.region
}
}
-
- // Save the location into cache on a successful makeBucket response.
- c.bucketLocCache.Set(bucketName, location)
-
- // Return.
- return nil
-}
-
-// makeBucketRequest constructs request for makeBucket.
-func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) {
- // Validate input arguments.
- if err := isValidBucketName(bucketName); err != nil {
- return nil, err
- }
-
- // In case of Amazon S3. The make bucket issued on already
- // existing bucket would fail with 'AuthorizationMalformed' error
- // if virtual style is used. So we default to 'path style' as that
- // is the preferred method here. The final location of the
- // 'bucket' is provided through XML LocationConstraint data with
- // the request.
- targetURL := *c.endpointURL
- targetURL.Path = "/" + bucketName + "/"
-
- // get a new HTTP request for the method.
- req, err := http.NewRequest("PUT", targetURL.String(), nil)
- if err != nil {
- return nil, err
- }
-
- // set UserAgent for the request.
- c.setUserAgent(req)
-
- // set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ // PUT bucket request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ bucketLocation: location,
}
// If location is not 'us-east-1' create bucket location config.
@@ -111,30 +73,29 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
var createBucketConfigBytes []byte
createBucketConfigBytes, err = xml.Marshal(createBucketConfig)
if err != nil {
- return nil, err
- }
- createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes)
- req.Body = ioutil.NopCloser(createBucketConfigBuffer)
- req.ContentLength = int64(len(createBucketConfigBytes))
- // Set content-md5.
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes)))
- if c.signature.isV4() {
- // Set sha256.
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes)))
+ return err
}
+ reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
+ reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
+ reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
+ reqMetadata.contentLength = int64(len(createBucketConfigBytes))
}
- // Sign the request.
- if c.signature.isV4() {
- // Signature calculated for MakeBucket request should be for 'us-east-1',
- // regardless of the bucket's location constraint.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ // Execute PUT to create a new bucket.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
}
- // Return signed request.
- return req, nil
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+
+ // Success.
+ return nil
}
// SetBucketPolicy set the access permissions on an existing bucket.
@@ -145,48 +106,46 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// readonly - anonymous get access for everyone at a given object prefix.
// readwrite - anonymous list/put/delete access to a given object prefix.
// writeonly - anonymous put/delete access to a given object prefix.
-func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
+func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil {
return err
}
- if !bucketPolicy.isValidBucketPolicy() {
+
+ if !bucketPolicy.IsValidBucketPolicy() {
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
}
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
- if err != nil {
+
+ policyInfo, err := c.getBucketPolicy(bucketName)
+ errResponse := ToErrorResponse(err)
+ if err != nil && errResponse.Code != "NoSuchBucketPolicy" {
return err
}
- // For bucket policy set to 'none' we need to remove the policy.
- if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
- // No policies to set, return success.
+
+ if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
+ // As the request is for removing policy and the bucket
+ // has empty policy statements, just return success.
return nil
}
- // Remove any previous policies at this path.
- policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
- // generating []Statement for the given bucketPolicy.
- statements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix)
- if err != nil {
- return err
- }
- policy.Statements = append(policy.Statements, statements...)
+ policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
+
// Save the updated policies.
- return c.putBucketPolicy(bucketName, policy)
+ return c.putBucketPolicy(bucketName, policyInfo)
}
// Saves a new bucket policy.
-func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
+func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// If there are no policy statements, we should remove entire policy.
- if len(policy.Statements) == 0 {
+ if len(policyInfo.Statements) == 0 {
return c.removeBucketPolicy(bucketName)
}
@@ -195,7 +154,7 @@ func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) er
urlValues := make(url.Values)
urlValues.Set("policy", "")
- policyBytes, err := json.Marshal(&policy)
+ policyBytes, err := json.Marshal(&policyInfo)
if err != nil {
return err
}
@@ -223,3 +182,73 @@ func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) er
}
return nil
}
+
+// Removes all policies on a bucket.
+func (c Client) removeBucketPolicy(bucketName string) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("policy", "")
+
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// SetBucketNotification saves a new bucket notification.
+func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ notifBytes, err := xml.Marshal(bucketNotification)
+ if err != nil {
+ return err
+ }
+
+ notifBuffer := bytes.NewReader(notifBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Bytes: sumMD5(notifBytes),
+ contentSHA256Bytes: sum256(notifBytes),
+ }
+
+ // Execute PUT to upload a new bucket notification.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
+func (c Client) RemoveAllBucketNotification(bucketName string) error {
+ return c.SetBucketNotification(bucketName, BucketNotification{})
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go
index 937c74d462ad50fc620b4313be959b2b822f0e4a..0158bc1d84cef3d15fb5e2d90e30cfc8aacddc8e 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-common.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-common.go
@@ -17,13 +17,11 @@
package minio
import (
- "crypto/md5"
- "crypto/sha256"
- "fmt"
- "hash"
"io"
"math"
"os"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Verify if reader is *os.File
@@ -44,24 +42,6 @@ func isReadAt(reader io.Reader) (ok bool) {
return
}
-// shouldUploadPart - verify if part should be uploaded.
-func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
- // If part not found should upload the part.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
- return true
- }
- // if size mismatches should upload the part.
- if objPart.Size != uploadedPart.Size {
- return true
- }
- // if md5sum mismatches should upload the part.
- if objPart.ETag != uploadedPart.ETag {
- return true
- }
- return false
-}
-
// optimalPartInfo - calculate the optimal part info for a given
// object size.
//
@@ -69,7 +49,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
-// minPartSize - 5MiB
+// minPartSize - 64MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
@@ -95,181 +75,21 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
-// hashCopyBuffer is identical to hashCopyN except that it doesn't take
-// any size argument but takes a buffer argument and reader should be
-// of io.ReaderAt interface.
-//
-// Stages reads from offsets into the buffer, if buffer is nil it is
-// initialized to optimalBufferSize.
-func (c Client) hashCopyBuffer(writer io.Writer, reader io.ReaderAt, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Buffer is nil, initialize.
- if buf == nil {
- buf = make([]byte, optimalReadBufferSize)
- }
-
- // Offset to start reading from.
- var readAtOffset int64
-
- // Following block reads data at an offset from the input
- // reader and copies data to into local temporary file.
- for {
- readAtSize, rerr := reader.ReadAt(buf, readAtOffset)
- if rerr != nil {
- if rerr != io.EOF {
- return nil, nil, 0, rerr
- }
- }
- writeSize, werr := hashWriter.Write(buf[:readAtSize])
- if werr != nil {
- return nil, nil, 0, werr
- }
- if readAtSize != writeSize {
- return nil, nil, 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue)
- }
- readAtOffset += int64(writeSize)
- size += int64(writeSize)
- if rerr == io.EOF {
- break
- }
- }
-
- // Finalize md5 sum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
-// hashCopy is identical to hashCopyN except that it doesn't take
-// any size argument.
-func (c Client) hashCopy(writer io.Writer, reader io.Reader) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Using copyBuffer to copy in large buffers, default buffer
- // for io.Copy of 32KiB is too small.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return nil, nil, 0, err
- }
-
- // Finalize md5 sum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
-// hashCopyN - Calculates Md5sum and SHA256sum for up to partSize amount of bytes.
-func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(writer, hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256)
- }
-
- // Copies to input at writer.
- size, err = io.CopyN(hashWriter, reader, partSize)
- if err != nil {
- // If not EOF return error right here.
- if err != io.EOF {
- return nil, nil, 0, err
- }
- }
-
- // Finalize md5shum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
- }
- return md5Sum, sha256Sum, size, err
-}
-
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
-func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
+func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return "", false, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return "", false, err
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
}
-
- // Set content Type to default if empty string.
- if contentType == "" {
- contentType = "application/octet-stream"
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return "", err
}
- // Find upload id for previous upload for an object.
- uploadID, err = c.findUploadID(bucketName, objectName)
+ // Initiate multipart upload for an object.
+ initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
if err != nil {
- return "", false, err
- }
- if uploadID == "" {
- // Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
- if err != nil {
- return "", false, err
- }
- // Save the new upload id.
- uploadID = initMultipartUploadResult.UploadID
- // Indicate that this is a new upload id.
- isNew = true
- }
- return uploadID, isNew, nil
-}
-
-// computeHash - Calculates MD5 and SHA256 for an input read Seeker.
-func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) {
- // MD5 and SHA256 hasher.
- var hashMD5, hashSHA256 hash.Hash
- // MD5 and SHA256 hasher.
- hashMD5 = md5.New()
- hashWriter := io.MultiWriter(hashMD5)
- if c.signature.isV4() {
- hashSHA256 = sha256.New()
- hashWriter = io.MultiWriter(hashMD5, hashSHA256)
- }
-
- // If no buffer is provided, no need to allocate just use io.Copy.
- size, err = io.Copy(hashWriter, reader)
- if err != nil {
- return nil, nil, 0, err
- }
-
- // Seek back reader to the beginning location.
- if _, err := reader.Seek(0, 0); err != nil {
- return nil, nil, 0, err
- }
-
- // Finalize md5shum and sha256 sum.
- md5Sum = hashMD5.Sum(nil)
- if c.signature.isV4() {
- sha256Sum = hashSHA256.Sum(nil)
+ return "", err
}
- return md5Sum, sha256Sum, size, nil
+ return initMultipartUploadResult.UploadID, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go
index 45d5693fcbc9163950af39531a5af8838b2fc737..32fa873d8794d32a79c8c3c7879cef93baa01f22 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-copy.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go
@@ -16,53 +16,7 @@
package minio
-import "net/http"
-
-// CopyObject - copy a source object into a new object with the provided name in the provided bucket
-func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
- }
- if err := isValidObjectName(objectName); err != nil {
- return err
- }
- if objectSource == "" {
- return ErrInvalidArgument("Object source cannot be empty.")
- }
-
- // customHeaders apply headers.
- customHeaders := make(http.Header)
- for _, cond := range cpCond.conditions {
- customHeaders.Set(cond.key, cond.value)
- }
-
- // Set copy source.
- customHeaders.Set("x-amz-copy-source", objectSource)
-
- // Execute PUT on objectName.
- resp, err := c.executeMethod("PUT", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeaders,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
- }
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, objectName)
- }
- }
-
- // Decode copy response on success.
- cpObjRes := copyObjectResult{}
- err = xmlDecoder(resp.Body, &cpObjRes)
- if err != nil {
- return err
- }
-
- // Return nil on success.
- return nil
+// CopyObject - copy a source object into a new object
+func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error {
+ return c.ComposeObject(dst, []SourceInfo{src})
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-encrypted.go b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
new file mode 100644
index 0000000000000000000000000000000000000000..534a21ecfa03429fce0c7042d91d6fe1af325b48
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-encrypted.go
@@ -0,0 +1,46 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+// PutEncryptedObject - Encrypt and store object.
+func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+
+ if encryptMaterials == nil {
+ return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
+ }
+
+ if err := encryptMaterials.SetupEncryptMode(reader); err != nil {
+ return 0, err
+ }
+
+ if metadata == nil {
+ metadata = make(map[string][]string)
+ }
+
+ // Set the necessary encryption headers, for future decryption.
+ metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
+ metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
+ metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
+
+ return c.putObjectMultipartStreamNoLength(bucketName, objectName, encryptMaterials, metadata, progress)
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go
index e7a1a9685bf25476dc5305ea7f69592612d12c0f..81cdf5c2c0bf4913f72d6f1cc9678040333fd73e 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-file.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-file.go
@@ -17,23 +17,20 @@
package minio
import (
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
"mime"
"os"
"path/filepath"
- "sort"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
@@ -54,10 +51,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// Save the file size.
fileSize := fileStat.Size()
- // Check for largest object size allowed.
- if fileSize > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
- }
+ objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
@@ -67,184 +61,6 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
- // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
- // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
- if isGoogleEndpoint(c.endpointURL) {
- if fileSize > int64(maxSinglePutObjectSize) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: fmt.Sprintf("Invalid Content-Length %d for file uploads to Google Cloud Storage.", fileSize),
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
-
- // NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
- if fileSize > int64(maxSinglePutObjectSize) {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize),
- Key: objectName,
- BucketName: bucketName,
- }
- }
- // Do not compute MD5 for anonymous requests to Amazon
- // S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
-
- // Small object upload is initiated for uploads for input data size smaller than 5MiB.
- if fileSize < minPartSize && fileSize >= 0 {
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
- // Upload all large objects as multipart.
- n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "NotImplemented" {
- // If size of file is greater than '5GiB' fail.
- if fileSize > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
- }
- return n, err
- }
- return n, nil
-}
-
-// putObjectMultipartFromFile - Creates object from contents of *os.File
-//
-// NOTE: This function is meant to be used for readers with local
-// file as in *os.File. This function resumes by skipping all the
-// necessary parts which were already uploaded by verifying them
-// against MD5SUM of each individual parts. This function also
-// effectively utilizes file system capabilities of reading from
-// specific sections and not having to create temporary files.
-func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var completeMultipartUpload completeMultipartUpload
-
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // If this session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously upload parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(fileSize)
- if err != nil {
- return 0, err
- }
-
- // Part number always starts with '1'.
- partNumber := 1
-
- for partNumber <= totalPartsCount {
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
-
- // Calculates MD5 and SHA256 sum for a section reader.
- var md5Sum, sha256Sum []byte
- var prtSize int64
- md5Sum, sha256Sum, prtSize, err = c.computeHash(sectionReader)
- if err != nil {
- return 0, err
- }
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(sectionReader, progress)
-
- // Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
- ETag: hex.EncodeToString(md5Sum),
- PartNumber: partNumber,
- Size: prtSize,
- }, partsInfo) {
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
- md5Sum, sha256Sum, prtSize)
- if err != nil {
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
- }
- }
- }
-
- // Save successfully uploaded size.
- totalUploadedSize += prtSize
-
- // Increment part number.
- partNumber++
- }
-
- // Verify if we uploaded all data.
- if totalUploadedSize != fileSize {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
- }
-
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(completeMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(completeMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
+ objMetadata["Content-Type"] = []string{contentType}
+ return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
index 1b2b44882f276e69fc40452af1dbc075c04bfcfc..aefeb5f263c417072ca80a0b4c0e13d89587aab6 100644
--- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go
+++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go
@@ -18,181 +18,147 @@ package minio
import (
"bytes"
- "encoding/hex"
"encoding/xml"
+ "fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
- "os"
+ "runtime/debug"
"sort"
"strconv"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// Comprehensive put object operation involving multipart resumable uploads.
-//
-// Following code handles these types of readers.
-//
-// - *os.File
-// - *minio.Object
-// - Any reader which has a method 'ReadAt()'
-//
-// If we exhaust all the known types, code proceeds to use stream as
-// is where each part is re-downloaded, checksummed and verified
-// before upload.
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- if size > 0 && size > minPartSize {
- // Verify if reader is *os.File, then use file system functionalities.
- if isFile(reader) {
- return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
- }
- // Verify if reader is *minio.Object or io.ReaderAt.
- // NOTE: Verification of object is kept for a specific purpose
- // while it is going to be duck typed similar to io.ReaderAt.
- // It is to indicate that *minio.Object implements io.ReaderAt.
- // and such a functionality is used in the subsequent code
- // path.
- if isObject(reader) || isReadAt(reader) {
- return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
+func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
+ metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, metadata, progress)
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
}
- // For any other data size and reader type we do generic multipart
- // approach by staging data in temporary files and uploading them.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
+ return n, err
}
-// putObjectStream uploads files bigger than 5MiB, and also supports
-// special case where size is unknown i.e '-1'.
-func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
- // Total data read and written to server. should be equal to 'size' at the end of the call.
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
var totalUploadedSize int64
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
- // A map of all previously uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // getUploadID for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
if err != nil {
return 0, err
}
- // If This session is a continuation of a previous session fetch all
- // previously uploaded parts info.
- if !isNew {
- // Fetch previously uploaded parts and maximum part size.
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(size)
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
// Part number always starts with '1'.
partNumber := 1
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
for partNumber <= totalPartsCount {
- // Calculates MD5 and SHA256 sum while copying partSize bytes
- // into tmpBuffer.
- md5Sum, sha256Sum, prtSize, rErr := c.hashCopyN(tmpBuffer, reader, partSize)
- if rErr != nil {
- if rErr != io.EOF {
- return 0, rErr
- }
+ // Choose hash algorithms to be calculated by hashCopyN,
+ // avoid sha256 with non-v4 signature request or
+ // HTTPS connection.
+ hashAlgos, hashSums := c.hashMaterials()
+
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF {
+ return 0, rErr
+ }
+
+ // Calculates hash sums while copying partSize bytes into cw.
+ for k, v := range hashAlgos {
+ v.Write(buf[:length])
+ hashSums[k] = v.Sum(nil)
}
- var reader io.Reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
- reader = newHook(tmpBuffer, progress)
-
- // Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
- ETag: hex.EncodeToString(md5Sum),
- PartNumber: partNumber,
- Size: prtSize,
- }, partsInfo) {
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
- if err != nil {
- // Reset the temporary buffer upon any error.
- tmpBuffer.Reset()
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
- }
- }
+ rd := newHook(bytes.NewReader(buf[:length]), progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
+ hashSums["md5"], hashSums["sha256"], int64(length), metadata)
+ if err != nil {
+ return totalUploadedSize, err
}
- // Reset the temporary buffer.
- tmpBuffer.Reset()
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
// Save successfully uploaded size.
- totalUploadedSize += prtSize
-
- // For unknown size, Read EOF we break away.
- // We do not have to upload till totalPartsCount.
- if size < 0 && rErr == io.EOF {
- break
- }
+ totalUploadedSize += int64(length)
// Increment part number.
partNumber++
- }
- // Verify if we uploaded all the data.
- if size > 0 {
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
}
}
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- }
-
- if size > 0 {
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts))
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
+ if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
@@ -201,12 +167,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return initiateMultipartUploadResult{}, err
}
@@ -214,13 +180,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
urlValues := make(url.Values)
urlValues.Set("uploads", "")
- if contentType == "" {
- contentType = "application/octet-stream"
- }
-
// Set ContentType header.
customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
+ for k, v := range metadata {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // Set a default content-type header if the latter is not provided
+ if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
reqMetadata := requestMetadata{
bucketName: bucketName,
@@ -249,26 +220,29 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
return initiateMultipartUploadResult, nil
}
+const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
+
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) {
+func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
+ partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return objectPart{}, err
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectPart{}, err
}
- if err := isValidObjectName(objectName); err != nil {
- return objectPart{}, err
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectPart{}, err
}
if size > maxPartSize {
- return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
+ return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName)
}
if size <= -1 {
- return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
+ return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName)
}
if partNumber <= 0 {
- return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
+ return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.")
}
if uploadID == "" {
- return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
+ return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.")
}
// Get resources properly escaped and lined up before using them in http request.
@@ -278,10 +252,21 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
// Set upload id.
urlValues.Set("uploadId", uploadID)
+ // Set encryption headers, if any.
+ customHeader := make(http.Header)
+ for k, v := range metadata {
+ if len(v) > 0 {
+ if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
+ customHeader.Set(k, v[0])
+ }
+ }
+ }
+
reqMetadata := requestMetadata{
bucketName: bucketName,
objectName: objectName,
queryValues: urlValues,
+ customHeader: customHeader,
contentBody: reader,
contentLength: size,
contentMD5Bytes: md5Sum,
@@ -292,15 +277,15 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
resp, err := c.executeMethod("PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
- return objectPart{}, err
+ return ObjectPart{}, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// Once successfully uploaded, return completed part.
- objPart := objectPart{}
+ objPart := ObjectPart{}
objPart.Size = size
objPart.PartNumber = partNumber
// Trim off the odd double quotes from ETag in the beginning and end.
@@ -310,12 +295,13 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, complete completeMultipartUpload) (completeMultipartUploadResult, error) {
+func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
+ complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return completeMultipartUploadResult{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return completeMultipartUploadResult{}, err
}
@@ -351,11 +337,32 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName)
}
}
+
+ // Read resp.Body into a []bytes to parse for Error response inside the body
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return completeMultipartUploadResult{}, err
+ }
// Decode completed multipart upload response on success.
completeMultipartUploadResult := completeMultipartUploadResult{}
- err = xmlDecoder(resp.Body, &completeMultipartUploadResult)
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult)
if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
return completeMultipartUploadResult, err
+ } else if completeMultipartUploadResult.Bucket == "" {
+ // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied.
+ // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values
+ // of the members.
+
+ // Decode completed multipart upload response on failure
+ completeMultipartUploadErr := ErrorResponse{}
+ err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr)
+ if err != nil {
+ // xml parsing failure due to presence an ill-formed xml fragment
+ return completeMultipartUploadResult, err
+ }
+ return completeMultipartUploadResult, completeMultipartUploadErr
}
return completeMultipartUploadResult, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go
deleted file mode 100644
index ebbc380c340a9f2262060921888f2dfe137745c9..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-progress.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import "io"
-
-// PutObjectWithProgress - With progress.
-func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
- if reader == nil {
- return 0, ErrInvalidArgument("Input reader is invalid, cannot be nil.")
- }
-
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
-
- // Check for largest object size allowed.
- if size > int64(maxMultipartPutObjectSize) {
- return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
- }
-
- // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
- // So we fall back to single PUT operation with the maximum limit of 5GiB.
- if isGoogleEndpoint(c.endpointURL) {
- if size <= -1 {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "Content-Length cannot be negative for file uploads to Google Cloud Storage.",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
- }
-
- // NOTE: S3 doesn't allow anonymous multipart requests.
- if isAmazonEndpoint(c.endpointURL) && c.anonymous {
- if size <= -1 {
- return 0, ErrorResponse{
- Code: "NotImplemented",
- Message: "Content-Length cannot be negative for anonymous requests.",
- Key: objectName,
- BucketName: bucketName,
- }
- }
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Do not compute MD5 for anonymous requests to Amazon
- // S3. Uploads up to 5GiB in size.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
- }
-
- // putSmall object.
- if size < minPartSize && size >= 0 {
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
- }
- // For all sizes greater than 5MiB do multipart.
- n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
- if err != nil {
- errResp := ToErrorResponse(err)
- // Verify if multipart functionality is not available, if not
- // fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
- // Verify if size of reader is greater than '5GiB'.
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // Fall back to uploading as single PutObject operation.
- return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
- }
- return n, err
- }
- return n, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go
deleted file mode 100644
index 957e3380e4a9d6c374c3bbf162d37d8fb0c21a64..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/api-put-object-readat.go
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "sort"
-)
-
-// shouldUploadPartReadAt - verify if part should be uploaded.
-func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
- // If part not found part should be uploaded.
- uploadedPart, found := objectParts[objPart.PartNumber]
- if !found {
- return true
- }
- // if size mismatches part should be uploaded.
- if uploadedPart.Size != objPart.Size {
- return true
- }
- return false
-}
-
-// putObjectMultipartFromReadAt - Uploads files bigger than 5MiB. Supports reader
-// of type which implements io.ReaderAt interface (ReadAt method).
-//
-// NOTE: This function is meant to be used for all readers which
-// implement io.ReaderAt which allows us for resuming multipart
-// uploads but reading at an offset, which would avoid re-read the
-// data which was already uploaded. Internally this function uses
-// temporary files for staging all the data, these temporary files are
-// cleaned automatically when the caller i.e http client closes the
-// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Get upload id for an object, initiates a new multipart request
- // if it cannot find any previously partially uploaded object.
- uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
- if err != nil {
- return 0, err
- }
-
- // Total data read and written to server. should be equal to 'size' at the end of the call.
- var totalUploadedSize int64
-
- // Complete multipart upload.
- var complMultipartUpload completeMultipartUpload
-
- // A map of all uploaded parts.
- var partsInfo = make(map[int]objectPart)
-
- // Fetch all parts info previously uploaded.
- if !isNew {
- partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
- if err != nil {
- return 0, err
- }
- }
-
- // Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
- if err != nil {
- return 0, err
- }
-
- // Used for readability, lastPartNumber is always
- // totalPartsCount.
- lastPartNumber := totalPartsCount
-
- // partNumber always starts with '1'.
- partNumber := 1
-
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
-
- // Read defaults to reading at 5MiB buffer.
- readAtBuffer := make([]byte, optimalReadBufferSize)
-
- // Upload all the missing parts.
- for partNumber <= lastPartNumber {
- // Verify object if its uploaded.
- verifyObjPart := objectPart{
- PartNumber: partNumber,
- Size: partSize,
- }
- // Special case if we see a last part number, save last part
- // size as the proper part size.
- if partNumber == lastPartNumber {
- verifyObjPart = objectPart{
- PartNumber: lastPartNumber,
- Size: lastPartSize,
- }
- }
-
- // Verify if part should be uploaded.
- if !shouldUploadPartReadAt(verifyObjPart, partsInfo) {
- // Increment part number when not uploaded.
- partNumber++
- if progress != nil {
- // Update the progress reader for the skipped part.
- if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil {
- return 0, err
- }
- }
- continue
- }
-
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
-
- // Calculates MD5 and SHA256 sum for a section reader.
- var md5Sum, sha256Sum []byte
- var prtSize int64
- md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- return 0, err
- }
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(tmpBuffer, progress)
-
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, md5Sum, sha256Sum, prtSize)
- if err != nil {
- // Reset the buffer upon any error.
- tmpBuffer.Reset()
- return 0, err
- }
-
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
-
- // Increment part number here after successful part upload.
- partNumber++
-
- // Reset the buffer.
- tmpBuffer.Reset()
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- totalUploadedSize += part.Size
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- }
-
- // Verify if we uploaded all the data.
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
-
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts))
- }
-
- // Sort all completed parts.
- sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
- if err != nil {
- return totalUploadedSize, err
- }
-
- // Return final size.
- return totalUploadedSize, nil
-}
diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
new file mode 100644
index 0000000000000000000000000000000000000000..40cd5c2522c9948c0b5f61c5c61782cb805101a9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go
@@ -0,0 +1,436 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// PutObjectStreaming using AWS streaming signature V4
+func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
+ return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
+}
+
+// putObjectMultipartStream - upload a large object using
+// multipart upload and streaming signature for signing payload.
+// Comprehensive put object operation involving multipart uploads.
+//
+// Following code handles these types of readers.
+//
+// - *os.File
+// - *minio.Object
+// - Any reader which has a method 'ReadAt()'
+//
+func (c Client) putObjectMultipartStream(bucketName, objectName string,
+ reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+
+ // Verify if reader is *minio.Object, *os.File or io.ReaderAt.
+ // NOTE: Verification of object is kept for a specific purpose
+ // while it is going to be duck typed similar to io.ReaderAt.
+ // It is to indicate that *minio.Object implements io.ReaderAt.
+ // and such a functionality is used in the subsequent code path.
+ if isFile(reader) || !isObject(reader) && isReadAt(reader) {
+ n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
+ } else {
+ n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ }
+ if err != nil {
+ errResp := ToErrorResponse(err)
+ // Verify if multipart functionality is not available, if not
+ // fall back to single PutObject operation.
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ // Verify if size of reader is greater than '5GiB'.
+ if size > maxSinglePutObjectSize {
+ return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+ }
+ // Fall back to uploading as single PutObject operation.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ }
+ }
+ return n, err
+}
+
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+ Part *ObjectPart
+}
+
+type uploadPartReq struct {
+ PartNum int // Number of the part uploaded.
+ Part *ObjectPart // Size of the part uploaded.
+}
+
+// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
+// Supports all readers which implements io.ReaderAt interface
+// (ReadAt method).
+//
+// NOTE: This function is meant to be used for all readers which
+// implement io.ReaderAt which allows us for resuming multipart
+// uploads but reading at an offset, which would avoid re-read the
+// data which was already uploaded. Internally this function uses
+// temporary files for staging all the data, these temporary files are
+// cleaned automatically when the caller i.e http client closes the
+// stream after uploading all the contents successfully.
+func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
+ reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload in progress, if the
+ // function returns any error, since we do not resume
+ // we should purge the parts which have been uploaded
+ // to relinquish storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Declare a channel that sends the next part number to be uploaded.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadPartsCh := make(chan uploadPartReq, 10000)
+
+ // Declare a channel that sends back the response of a part upload.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
+
+ // Used for readability, lastPartNumber is always totalPartsCount.
+ lastPartNumber := totalPartsCount
+
+ // Send each part number to the channel to be processed.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
+ }
+ close(uploadPartsCh)
+
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= totalWorkers; w++ {
+ go func(partSize int64) {
+ // Each worker will draw from the part channel and upload in parallel.
+ for uploadReq := range uploadPartsCh {
+
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(uploadReq.PartNum-1) * partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if uploadReq.PartNum == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ partSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ sectionReader, uploadReq.PartNum,
+ nil, nil, partSize, metadata)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Save successfully uploaded part metadata.
+ uploadReq.Part = &objPart
+
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: objPart.Size,
+ PartNum: uploadReq.PartNum,
+ Part: uploadReq.Part,
+ Error: nil,
+ }
+ }
+ }(partSize)
+ }
+
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ // part, ok := partsInfo[uploadRes.PartNum]
+ part := uploadRes.Part
+ if part == nil {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ // Store the parts to be completed in order.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Verify if we uploaded all the data.
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
+ reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
+ if err != nil {
+ return 0, err
+ }
+
+ // Initiates a new multipart request
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ if err != nil {
+ return 0, err
+ }
+
+ // Aborts the multipart upload if the function returns
+ // any error, since we do not resume we should purge
+ // the parts which have been uploaded to relinquish
+ // storage space.
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Total data read and written to server. should be equal to 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Part number always starts with '1'.
+ var partNumber int
+ for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ hookReader := newHook(reader, progress)
+
+ // Proceed to upload the part.
+ if partNumber == totalPartsCount {
+ partSize = lastPartSize
+ }
+
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ io.LimitReader(hookReader, partSize),
+ partNumber, nil, nil, partSize, metadata)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += partSize
+ }
+
+ // Verify if we uploaded all the data.
+ if size > 0 {
+ if totalUploadedSize != size {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
+ }
+ }
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
+}
+
+// putObjectNoChecksum special function used Google Cloud Storage. This special function
+// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
+func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Size -1 is only supported on Google Cloud Storage, we error
+ // out in all other situations.
+ if size < 0 && !s3utils.IsGoogleEndpoint(c.endpointURL) {
+ return 0, ErrEntityTooSmall(size, bucketName, objectName)
+ }
+ if size > 0 {
+ if isReadAt(reader) && !isObject(reader) {
+ reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
+ }
+ }
+
+ // Update progress reader appropriately to the latest offset as we
+ // read from the source.
+ readSeeker := newHook(reader, progress)
+
+ // This function does not calculate sha256 and md5sum for payload.
+ // Execute put object.
+ st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
+ if err != nil {
+ return 0, err
+ }
+ if st.Size != size {
+ return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+ }
+ return size, nil
+}
+
+// putObjectDo - executes the put object http operation.
+// NOTE: You must have WRITE permissions on a bucket to add an object to it.
+func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Set headers.
+ customHeader := make(http.Header)
+
+ // Set metadata to headers
+ for k, v := range metaData {
+ if len(v) > 0 {
+ customHeader.Set(k, v[0])
+ }
+ }
+
+ // If Content-Type is not provided, set the default application/octet-stream one
+ if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
+ customHeader.Set("Content-Type", "application/octet-stream")
+ }
+
+ // Populate request metadata.
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Bytes: md5Sum,
+ contentSHA256Bytes: sha256Sum,
+ }
+
+ // Execute PUT an objectName.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+ }
+ }
+
+ var objInfo ObjectInfo
+ // Trim off the odd double quotes from ETag in the beginning and end.
+ objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
+ objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
+ // A success here means data was written to server successfully.
+ objInfo.Size = size
+
+ // Return here.
+ return objInfo, nil
+}
diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go
index 10390c6c5cb3d60b595709b4326ae7aa16693bd1..94db825930c5d01a775c533654eaf6a20a608301 100644
--- a/vendor/github.com/minio/minio-go/api-put-object.go
+++ b/vendor/github.com/minio/minio-go/api-put-object.go
@@ -18,87 +18,115 @@ package minio
import (
"bytes"
+ "fmt"
"io"
- "io/ioutil"
- "net/http"
"os"
"reflect"
"runtime"
+ "runtime/debug"
+ "sort"
"strings"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
+// toInt - converts go value to its integer representation based
+// on the value kind if it is an integer.
+func toInt(value reflect.Value) (size int64) {
+ size = -1
+ if value.IsValid() {
+ switch value.Kind() {
+ case reflect.Int:
+ fallthrough
+ case reflect.Int8:
+ fallthrough
+ case reflect.Int16:
+ fallthrough
+ case reflect.Int32:
+ fallthrough
+ case reflect.Int64:
+ size = value.Int()
+ }
+ }
+ return size
+}
+
// getReaderSize - Determine the size of Reader if available.
func getReaderSize(reader io.Reader) (size int64, err error) {
- var result []reflect.Value
size = -1
- if reader != nil {
- // Verify if there is a method by name 'Size'.
- lenFn := reflect.ValueOf(reader).MethodByName("Size")
- if lenFn.IsValid() {
- if lenFn.Kind() == reflect.Func {
- // Call the 'Size' function and save its return value.
- result = lenFn.Call([]reflect.Value{})
- if len(result) == 1 {
- lenValue := result[0]
- if lenValue.IsValid() {
- switch lenValue.Kind() {
- case reflect.Int:
- fallthrough
- case reflect.Int8:
- fallthrough
- case reflect.Int16:
- fallthrough
- case reflect.Int32:
- fallthrough
- case reflect.Int64:
- size = lenValue.Int()
- }
- }
- }
+ if reader == nil {
+ return -1, nil
+ }
+ // Verify if there is a method by name 'Size'.
+ sizeFn := reflect.ValueOf(reader).MethodByName("Size")
+ // Verify if there is a method by name 'Len'.
+ lenFn := reflect.ValueOf(reader).MethodByName("Len")
+ if sizeFn.IsValid() {
+ if sizeFn.Kind() == reflect.Func {
+ // Call the 'Size' function and save its return value.
+ result := sizeFn.Call([]reflect.Value{})
+ if len(result) == 1 {
+ size = toInt(result[0])
+ }
+ }
+ } else if lenFn.IsValid() {
+ if lenFn.Kind() == reflect.Func {
+ // Call the 'Len' function and save its return value.
+ result := lenFn.Call([]reflect.Value{})
+ if len(result) == 1 {
+ size = toInt(result[0])
}
- } else {
- // Fallback to Stat() method, two possible Stat() structs
- // exist.
- switch v := reader.(type) {
- case *os.File:
- var st os.FileInfo
- st, err = v.Stat()
- if err != nil {
- // Handle this case specially for "windows",
- // certain files for example 'Stdin', 'Stdout' and
- // 'Stderr' it is not allowed to fetch file information.
- if runtime.GOOS == "windows" {
- if strings.Contains(err.Error(), "GetFileInformationByHandle") {
- return -1, nil
- }
+ }
+ } else {
+ // Fallback to Stat() method, two possible Stat() structs exist.
+ switch v := reader.(type) {
+ case *os.File:
+ var st os.FileInfo
+ st, err = v.Stat()
+ if err != nil {
+ // Handle this case specially for "windows",
+ // certain files for example 'Stdin', 'Stdout' and
+ // 'Stderr' it is not allowed to fetch file information.
+ if runtime.GOOS == "windows" {
+ if strings.Contains(err.Error(), "GetFileInformationByHandle") {
+ return -1, nil
}
- return
- }
- // Ignore if input is a directory, throw an error.
- if st.Mode().IsDir() {
- return -1, ErrInvalidArgument("Input file cannot be a directory.")
- }
- // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
- // represent *os.File type but internally do not
- // implement Seekable calls. Ignore them and treat
- // them like a stream with unknown length.
- switch st.Name() {
- case "stdin":
- fallthrough
- case "stdout":
- fallthrough
- case "stderr":
- return
}
- size = st.Size()
- case *Object:
- var st ObjectInfo
- st, err = v.Stat()
- if err != nil {
- return
- }
- size = st.Size
+ return
+ }
+ // Ignore if input is a directory, throw an error.
+ if st.Mode().IsDir() {
+ return -1, ErrInvalidArgument("Input file cannot be a directory.")
+ }
+ // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
+ // represent *os.File type but internally do not
+ // implement Seekable calls. Ignore them and treat
+ // them like a stream with unknown length.
+ switch st.Name() {
+ case "stdin", "stdout", "stderr":
+ return
+ // Ignore read/write stream of os.Pipe() which have unknown length too.
+ case "|0", "|1":
+ return
+ }
+ var pos int64
+ pos, err = v.Seek(0, 1) // SeekCurrent.
+ if err != nil {
+ return -1, err
+ }
+ size = st.Size() - pos
+ case *Object:
+ var st ObjectInfo
+ st, err = v.Stat()
+ if err != nil {
+ return
}
+ var pos int64
+ pos, err = v.Seek(0, 1) // SeekCurrent.
+ if err != nil {
+ return -1, err
+ }
+ size = st.Size - pos
}
}
// Returns the size here.
@@ -107,7 +135,7 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
// completedParts is a collection of parts sortable by their part numbers.
// used for sorting the uploaded parts before completing the multipart request.
-type completedParts []completePart
+type completedParts []CompletePart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
@@ -117,168 +145,178 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
-// - For size smaller than 5MiB PutObject automatically does a single atomic Put operation.
-// - For size larger than 5MiB PutObject automatically does a resumable multipart Put operation.
-// - For size input as -1 PutObject does a multipart Put operation until input stream reaches EOF.
-// Maximum object size that can be uploaded through this operation will be 5TiB.
-//
-// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
-// So we fall back to single PUT operation with the maximum limit of 5GiB.
-//
-// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation.
+// - For size smaller than 64MiB PutObject automatically does a
+// single atomic Put operation.
+// - For size larger than 64MiB PutObject automatically does a
+// multipart Put operation.
+// - For size input as -1 PutObject does a multipart Put operation
+// until input stream reaches EOF. Maximum object size that can
+// be uploaded through this operation will be 5TiB.
func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil)
+ return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
+ "Content-Type": []string{contentType},
+ }, nil)
}
-// putObjectNoChecksum special function used Google Cloud Storage. This special function
-// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
+// but takes the size argument explicitly, this function avoids doing reflection
+// internally to figure out the size of input stream. Also if the input size is
+// lesser than 0 this function returns an error.
+func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
+}
+
+// PutObjectWithMetadata using AWS streaming signature V4
+func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
+}
+
+// PutObjectWithProgress using AWS streaming signature V4
+func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Size of the object.
+ var size int64
+
+ // Get reader size.
+ size, err = getReaderSize(reader)
+ if err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
- return 0, err
+
+ return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
+}
+
+func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+ // Check for largest object size allowed.
+ if size > int64(maxMultipartPutObjectSize) {
+ return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
}
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
+
+ // NOTE: Streaming signature is not supported by GCS.
+ if s3utils.IsGoogleEndpoint(c.endpointURL) {
+ // Do not compute MD5 for Google Cloud Storage.
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
- // Update progress reader appropriately to the latest offset as we
- // read from the source.
- readSeeker := newHook(reader, progress)
+ if c.overrideSignerType.IsV2() {
+ if size >= 0 && size < minPartSize {
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ }
+ return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
+ }
- // This function does not calculate sha256 and md5sum for payload.
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
- if err != nil {
- return 0, err
+ if size < 0 {
+ return c.putObjectMultipartStreamNoLength(bucketName, objectName, reader, metadata, progress)
}
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
+
+ if size < minPartSize {
+ return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
}
- return size, nil
+
+ // For all sizes greater than 64MiB do multipart.
+ return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
}
-// putObjectSingle is a special function for uploading single put object request.
-// This special function is used as a fallback when multipart upload fails.
-func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamNoLength(bucketName, objectName string, reader io.Reader, metadata map[string][]string,
+ progress io.Reader) (n int64, err error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
return 0, err
}
- if size > maxSinglePutObjectSize {
- return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
- // If size is a stream, upload up to 5GiB.
- if size <= -1 {
- size = maxSinglePutObjectSize
- }
- var md5Sum, sha256Sum []byte
- if size <= minPartSize {
- // Initialize a new temporary buffer.
- tmpBuffer := new(bytes.Buffer)
- md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size)
- reader = bytes.NewReader(tmpBuffer.Bytes())
- tmpBuffer.Reset()
- } else {
- // Initialize a new temporary file.
- var tmpFile *tempFile
- tmpFile, err = newTempFile("single$-putobject-single")
- if err != nil {
- return 0, err
- }
- defer tmpFile.Close()
- md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size)
- // Seek back to beginning of the temporary file.
- if _, err = tmpFile.Seek(0, 0); err != nil {
- return 0, err
- }
- reader = tmpFile
- }
- // Return error if its not io.EOF.
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
if err != nil {
- if err != io.EOF {
- return 0, err
- }
+ return 0, err
}
- // Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, reader, md5Sum, sha256Sum, size, contentType)
+
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(bucketName, objectName, metadata)
if err != nil {
return 0, err
}
- if st.Size != size {
- return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName)
- }
- // Progress the reader to the size if putObjectDo is successful.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil {
- return size, err
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(bucketName, objectName, uploadID)
}
- }
- return size, nil
-}
+ }()
-// putObjectDo - executes the put object http operation.
-// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return ObjectInfo{}, err
- }
- if err := isValidObjectName(objectName); err != nil {
- return ObjectInfo{}, err
- }
+ // Part number always starts with '1'.
+ partNumber := 1
- if size <= -1 {
- return ObjectInfo{}, ErrEntityTooSmall(size, bucketName, objectName)
- }
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
- if size > maxSinglePutObjectSize {
- return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
- }
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
- if strings.TrimSpace(contentType) == "" {
- contentType = "application/octet-stream"
- }
+ for partNumber <= totalPartsCount {
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF {
+ return 0, rErr
+ }
- // Set headers.
- customHeader := make(http.Header)
- customHeader.Set("Content-Type", contentType)
-
- // Populate request metadata.
- reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
- }
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), progress)
- // Execute PUT an objectName.
- resp, err := c.executeMethod("PUT", reqMetadata)
- defer closeResponse(resp)
- if err != nil {
- return ObjectInfo{}, err
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, rd, partNumber,
+ nil, nil, int64(length), metadata)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
+ }
}
- if resp != nil {
- if resp.StatusCode != http.StatusOK {
- return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName)
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
}
- var metadata ObjectInfo
- // Trim off the odd double quotes from ETag in the beginning and end.
- metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
- metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
- // A success here means data was written to server successfully.
- metadata.Size = size
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ return totalUploadedSize, err
+ }
- // Return here.
- return metadata, nil
+ // Return final size.
+ return totalUploadedSize, nil
}
diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go
index bd5842828e58b77f6f152670923f99831a09a05e..3574cbc1af12a9d113e901a503ca5efc9811c153 100644
--- a/vendor/github.com/minio/minio-go/api-remove.go
+++ b/vendor/github.com/minio/minio-go/api-remove.go
@@ -17,8 +17,13 @@
package minio
import (
+ "bytes"
+ "encoding/xml"
+ "io"
"net/http"
"net/url"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// RemoveBucket deletes the bucket name.
@@ -27,12 +32,13 @@ import (
// in the bucket must be deleted before successfully attempting this request.
func (c Client) RemoveBucket(bucketName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
// Execute DELETE on bucket.
resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
+ bucketName: bucketName,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
@@ -50,86 +56,168 @@ func (c Client) RemoveBucket(bucketName string) error {
return nil
}
-// RemoveBucketPolicy remove a bucket policy on given path.
-func (c Client) RemoveBucketPolicy(bucketName, objectPrefix string) error {
+// RemoveObject remove an object from a bucket.
+func (c Client) RemoveObject(bucketName, objectName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectPrefix(objectPrefix); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ // Execute DELETE on objectName.
+ resp, err := c.executeMethod("DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Bytes: emptySHA256,
+ })
+ defer closeResponse(resp)
if err != nil {
return err
}
- // No bucket policy found, nothing to remove return success.
- if policy.Statements == nil {
- return nil
+ if resp != nil {
+ // if some unexpected error happened and max retry is reached, we want to let client know
+ if resp.StatusCode != http.StatusNoContent {
+ return httpRespToErrorResponse(resp, bucketName, objectName)
+ }
}
- // Save new statements after removing requested bucket policy.
- policy.Statements = removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
+ // DeleteObject always responds with http '204' even for
+ // objects which do not exist. So no need to handle them
+ // specifically.
+ return nil
+}
- // Commit the update policy.
- return c.putBucketPolicy(bucketName, policy)
+// RemoveObjectError - container of Multi Delete S3 API error
+type RemoveObjectError struct {
+ ObjectName string
+ Err error
}
-// Removes all policies on a bucket.
-func (c Client) removeBucketPolicy(bucketName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
+// generateRemoveMultiObjects - generate the XML request for remove multi objects request
+func generateRemoveMultiObjectsRequest(objects []string) []byte {
+ rmObjects := []deleteObject{}
+ for _, obj := range objects {
+ rmObjects = append(rmObjects, deleteObject{Key: obj})
}
- // Get resources properly escaped and lined up before
- // using them in http request.
- urlValues := make(url.Values)
- urlValues.Set("policy", "")
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
+ return xmlBytes
+}
- // Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- })
- defer closeResponse(resp)
+// processRemoveMultiObjectsResponse - parse the remove multi objects web service
+// and return the success/failure result status for each object
+func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ err := xmlDecoder(body, rmResult)
if err != nil {
- return err
+ errorCh <- RemoveObjectError{ObjectName: "", Err: err}
+ return
}
- return nil
-}
-// RemoveObject remove an object from a bucket.
-func (c Client) RemoveObject(bucketName, objectName string) error {
- // Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ errorCh <- RemoveObjectError{
+ ObjectName: obj.Key,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }
}
- if err := isValidObjectName(objectName); err != nil {
- return err
+}
+
+// RemoveObjects remove multiples objects from a bucket.
+// The list of objects to remove are received from objectsCh.
+// Remove failures are sent back via error channel.
+func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ errorCh := make(chan RemoveObjectError, 1)
+
+ // Validate if bucket name is valid.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: err,
+ }
+ return errorCh
}
- // Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- })
- defer closeResponse(resp)
- if err != nil {
- return err
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: ErrInvalidArgument("Objects channel cannot be nil"),
+ }
+ return errorCh
}
- // DeleteObject always responds with http '204' even for
- // objects which do not exist. So no need to handle them
- // specifically.
- return nil
+
+ // Generate and call MultiDelete S3 requests based on entries received from objectsCh
+ go func(errorCh chan<- RemoveObjectError) {
+ maxEntries := 1000
+ finish := false
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Close error channel when Multi delete finishes.
+ defer close(errorCh)
+
+ // Loop over entries by 1000 and call MultiDelete requests
+ for {
+ if finish {
+ break
+ }
+ count := 0
+ var batch []string
+
+ // Try to gather 1000 entries
+ for object := range objectsCh {
+ batch = append(batch, object)
+ if count++; count >= maxEntries {
+ break
+ }
+ }
+ if count == 0 {
+ // Multi Objects Delete API doesn't accept empty object list, quit immediately
+ break
+ }
+ if count < maxEntries {
+ // We didn't have 1000 entries, so this is the last batch
+ finish = true
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Bytes: sumMD5(removeBytes),
+ contentSHA256Bytes: sum256(removeBytes),
+ })
+ if err != nil {
+ for _, b := range batch {
+ errorCh <- RemoveObjectError{ObjectName: b, Err: err}
+ }
+ continue
+ }
+
+ // Process multiobjects remove xml response
+ processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
+
+ closeResponse(resp)
+ }
+ }(errorCh)
+ return errorCh
}
// RemoveIncompleteUpload aborts an partially uploaded object.
-// Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
// Find multipart upload id of the object to be aborted.
@@ -151,10 +239,10 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// uploadID, all previously uploaded parts are deleted.
func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return err
}
@@ -164,9 +252,10 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
// Execute DELETE on multipart upload.
resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
index ca81e302d21a7ad97b12076556c20330bad7d59c..4b297407bf80028111c61eed5cbd76cacb39f25d 100644
--- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go
@@ -36,16 +36,45 @@ type owner struct {
ID string
}
-// commonPrefix container for prefix response.
-type commonPrefix struct {
+// CommonPrefix container for prefix response.
+type CommonPrefix struct {
Prefix string
}
-// listBucketResult container for listObjects response.
-type listBucketResult struct {
+// ListBucketV2Result container for listObjects response version 2.
+type ListBucketV2Result struct {
// A response can contain CommonPrefixes only if you have
// specified a delimiter.
- CommonPrefixes []commonPrefix
+ CommonPrefixes []CommonPrefix
+ // Metadata about each object returned.
+ Contents []ObjectInfo
+ Delimiter string
+
+ // Encoding type used to encode object keys in the response.
+ EncodingType string
+
+ // A flag that indicates whether or not ListObjects returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated bool
+ MaxKeys int64
+ Name string
+
+ // Hold the token that will be sent in the next request to fetch the next group of keys
+ NextContinuationToken string
+
+ ContinuationToken string
+ Prefix string
+
+ // FetchOwner and StartAfter are currently not used
+ FetchOwner string
+ StartAfter string
+}
+
+// ListBucketResult container for listObjects response.
+type ListBucketResult struct {
+ // A response can contain CommonPrefixes only if you have
+ // specified a delimiter.
+ CommonPrefixes []CommonPrefix
// Metadata about each object returned.
Contents []ObjectInfo
Delimiter string
@@ -73,8 +102,8 @@ type listBucketResult struct {
Prefix string
}
-// listMultipartUploadsResult container for ListMultipartUploads response
-type listMultipartUploadsResult struct {
+// ListMultipartUploadsResult container for ListMultipartUploads response
+type ListMultipartUploadsResult struct {
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
@@ -87,7 +116,7 @@ type listMultipartUploadsResult struct {
Prefix string
Delimiter string
// A response can contain CommonPrefixes only if you specify a delimiter.
- CommonPrefixes []commonPrefix
+ CommonPrefixes []CommonPrefix
}
// initiator container for who initiated multipart upload.
@@ -102,8 +131,8 @@ type copyObjectResult struct {
LastModified string // time string format "2006-01-02T15:04:05.000Z"
}
-// objectPart container for particular part of an object.
-type objectPart struct {
+// ObjectPart container for particular part of an object.
+type ObjectPart struct {
// Part number identifies the part.
PartNumber int
@@ -118,8 +147,8 @@ type objectPart struct {
Size int64
}
-// listObjectPartsResult container for ListObjectParts response.
-type listObjectPartsResult struct {
+// ListObjectPartsResult container for ListObjectParts response.
+type ListObjectPartsResult struct {
Bucket string
Key string
UploadID string `xml:"UploadId"`
@@ -134,7 +163,7 @@ type listObjectPartsResult struct {
// Indicates whether the returned list of parts is truncated.
IsTruncated bool
- ObjectParts []objectPart `xml:"Part"`
+ ObjectParts []ObjectPart `xml:"Part"`
EncodingType string
}
@@ -156,9 +185,9 @@ type completeMultipartUploadResult struct {
ETag string
}
-// completePart sub container lists individual part numbers and their
+// CompletePart sub container lists individual part numbers and their
// md5sum, part of completeMultipartUpload.
-type completePart struct {
+type CompletePart struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"`
// Part number identifies the part.
@@ -169,7 +198,7 @@ type completePart struct {
// completeMultipartUpload container for completing multipart upload.
type completeMultipartUpload struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"`
- Parts []completePart `xml:"Part"`
+ Parts []CompletePart `xml:"Part"`
}
// createBucketConfiguration container for bucket configuration.
@@ -177,3 +206,39 @@ type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
+
+// deleteObject container for Delete element in MultiObjects Delete XML request
+type deleteObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+}
+
+// deletedObject container for Deleted element in MultiObjects Delete XML response
+type deletedObject struct {
+ Key string
+ VersionID string `xml:"VersionId,omitempty"`
+ // These fields are ignored.
+ DeleteMarker bool
+ DeleteMarkerVersionID string
+}
+
+// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
+type nonDeletedObject struct {
+ Key string
+ Code string
+ Message string
+}
+
+// deletedMultiObjects container for MultiObjects Delete XML request
+type deleteMultiObjects struct {
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool
+ Objects []deleteObject `xml:"Object"`
+}
+
+// deletedMultiObjectsResult container for MultiObjects Delete XML response
+type deleteMultiObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []deletedObject `xml:"Deleted"`
+ UnDeletedObjects []nonDeletedObject `xml:"Error"`
+}
diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go
index b5db7fedc7be417a673dc72e8bbca68b2757cdd0..5f06bfc9e05b53ac5856e0ed48397c70c496859c 100644
--- a/vendor/github.com/minio/minio-go/api-stat.go
+++ b/vendor/github.com/minio/minio-go/api-stat.go
@@ -21,45 +21,98 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
-func (c Client) BucketExists(bucketName string) error {
+func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
- return err
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return false, err
}
// Execute HEAD on bucketName.
resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
+ bucketName: bucketName,
+ contentSHA256Bytes: emptySHA256,
})
defer closeResponse(resp)
if err != nil {
- return err
+ if ToErrorResponse(err).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ return false, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, "")
+ return false, httpRespToErrorResponse(resp, bucketName, "")
}
}
- return nil
+ return true, nil
+}
+
+// List of header keys to be filtered, usually
+// from all S3 API http responses.
+var defaultFilterKeys = []string{
+ "Connection",
+ "Transfer-Encoding",
+ "Accept-Ranges",
+ "Date",
+ "Server",
+ "Vary",
+ "x-amz-bucket-region",
+ "x-amz-request-id",
+ "x-amz-id-2",
+ // Add new headers to be ignored.
+}
+
+// Extract only necessary metadata header key/values by
+// filtering them out with a list of custom header keys.
+func extractObjMetadata(header http.Header) http.Header {
+ filterKeys := append([]string{
+ "ETag",
+ "Content-Length",
+ "Last-Modified",
+ "Content-Type",
+ }, defaultFilterKeys...)
+ return filterHeader(header, filterKeys)
}
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
- if err := isValidBucketName(bucketName); err != nil {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return ObjectInfo{}, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return ObjectInfo{}, err
+ }
+ reqHeaders := NewHeadReqHeaders()
+ return c.statObject(bucketName, objectName, reqHeaders)
+}
+
+// Lower level API for statObject supporting pre-conditions and range headers.
+func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
}
- if err := isValidObjectName(objectName); err != nil {
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
+ customHeader := make(http.Header)
+ for k, v := range reqHeaders.Header {
+ customHeader[k] = v
+ }
+
// Execute HEAD on objectName.
resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Bytes: emptySHA256,
+ customHeader: customHeader,
})
defer closeResponse(resp)
if err != nil {
@@ -75,19 +128,25 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
- // Parse content length.
- size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
- if err != nil {
- return ObjectInfo{}, ErrorResponse{
- Code: "InternalError",
- Message: "Content-Length is invalid. " + reportIssue,
- BucketName: bucketName,
- Key: objectName,
- RequestID: resp.Header.Get("x-amz-request-id"),
- HostID: resp.Header.Get("x-amz-id-2"),
- Region: resp.Header.Get("x-amz-bucket-region"),
+ // Parse content length is exists
+ var size int64 = -1
+ contentLengthStr := resp.Header.Get("Content-Length")
+ if contentLengthStr != "" {
+ size, err = strconv.ParseInt(contentLengthStr, 10, 64)
+ if err != nil {
+ // Content-Length is not valid
+ return ObjectInfo{}, ErrorResponse{
+ Code: "InternalError",
+ Message: "Content-Length is invalid. " + reportIssue,
+ BucketName: bucketName,
+ Key: objectName,
+ RequestID: resp.Header.Get("x-amz-request-id"),
+ HostID: resp.Header.Get("x-amz-id-2"),
+ Region: resp.Header.Get("x-amz-bucket-region"),
+ }
}
}
+
// Parse Last-Modified has http time format.
date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified"))
if err != nil {
@@ -101,17 +160,23 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
+
// Fetch content type if any present.
contentType := strings.TrimSpace(resp.Header.Get("Content-Type"))
if contentType == "" {
contentType = "application/octet-stream"
}
+
// Save object metadata info.
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = size
- objectStat.LastModified = date
- objectStat.ContentType = contentType
- return objectStat, nil
+ return ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: size,
+ LastModified: date,
+ ContentType: contentType,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
+ }, nil
}
diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go
index 93c0a6d235c1483fc0eb0c2d2931904778959167..946a58869fc7ed990a81142d2f8c612924b00661 100644
--- a/vendor/github.com/minio/minio-go/api.go
+++ b/vendor/github.com/minio/minio-go/api.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,42 +19,52 @@ package minio
import (
"bytes"
+ "crypto/md5"
+ "crypto/sha256"
"encoding/base64"
"encoding/hex"
+ "errors"
"fmt"
+ "hash"
"io"
"io/ioutil"
"math/rand"
+ "net"
"net/http"
"net/http/httputil"
"net/url"
"os"
- "regexp"
"runtime"
"strings"
"sync"
"time"
+
+ "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
- // AccessKeyID required for authorized requests.
- accessKeyID string
- // SecretAccessKey required for authorized requests.
- secretAccessKey string
- // Choose a signature type if necessary.
- signature SignatureType
- // Set to 'true' if Client has no access and secret keys.
- anonymous bool
+ // Parsed endpoint url provided by the user.
+ endpointURL url.URL
+
+ // Holds various credential providers.
+ credsProvider *credentials.Credentials
+
+ // Custom signerType value overrides all credentials.
+ overrideSignerType credentials.SignatureType
// User supplied.
appInfo struct {
appName string
appVersion string
}
- endpointURL *url.URL
+
+ // Indicate whether we are using https or not
+ secure bool
// Needs allocation.
httpClient *http.Client
@@ -63,6 +74,12 @@ type Client struct {
isTraceEnabled bool
traceOutput io.Writer
+ // S3 specific accelerated endpoint.
+ s3AccelerateEndpoint string
+
+ // Region endpoint
+ region string
+
// Random seed.
random *rand.Rand
}
@@ -70,7 +87,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "1.0.1"
+ libraryVersion = "3.0.3"
)
// User Agent should always following the below style.
@@ -84,55 +101,68 @@ const (
// NewV2 - instantiate minio client with Amazon S3 signature version
// '2' compatibility.
-func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
- // Set to use signature version '2'.
- clnt.signature = SignatureV2
+ clnt.overrideSignerType = credentials.SignatureV2
return clnt, nil
}
// NewV4 - instantiate minio client with Amazon S3 signature version
// '4' compatibility.
-func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
- // Set to use signature version '4'.
- clnt.signature = SignatureV4
+ clnt.overrideSignerType = credentials.SignatureV4
return clnt, nil
}
-// New - instantiate minio client Client, adds automatic verification
-// of signature.
-func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
- clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure)
+// New - instantiate minio client, adds automatic verification of signature.
+func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ clnt, err := privateNew(endpoint, creds, secure, "")
if err != nil {
return nil, err
}
- // Google cloud storage should be set to signature V2, force it if
- // not.
- if isGoogleEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV2
+ // Google cloud storage should be set to signature V2, force it if not.
+ if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
+ clnt.overrideSignerType = credentials.SignatureV2
}
- // If Amazon S3 set to signature v2.n
- if isAmazonEndpoint(clnt.endpointURL) {
- clnt.signature = SignatureV4
+ // If Amazon S3 set to signature v4.
+ if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
+ clnt.overrideSignerType = credentials.SignatureV4
}
return clnt, nil
}
+// NewWithCredentials - instantiate minio client with credentials provider
+// for retrieving credentials from various credentials provider such as
+// IAM, File, Env etc.
+func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
+ return privateNew(endpoint, creds, secure, region)
+}
+
+// NewWithRegion - instantiate minio client, with region configured. Unlike New(),
+// NewWithRegion avoids bucket-location lookup operations and it is slightly faster.
+// Use this function when if your application deals with single region.
+func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) {
+ creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "")
+ return privateNew(endpoint, creds, secure, region)
+}
+
// lockedRandSource provides protected rand source, implements rand.Source interface.
type lockedRandSource struct {
lk sync.Mutex
src rand.Source
}
-// Int63 returns a non-negative pseudo-random 63-bit integer as an
-// int64.
+// Int63 returns a non-negative pseudo-random 63-bit integer as an int64.
func (r *lockedRandSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
@@ -148,34 +178,75 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
-func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) {
+// redirectHeaders copies all headers when following a redirect URL.
+// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
+func redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) == 0 {
+ return nil
+ }
+ for key, val := range via[0].Header {
+ req.Header[key] = val
+ }
+ return nil
+}
+
+// getRegionFromURL - parse region from URL if present.
+func getRegionFromURL(u url.URL) (region string) {
+ region = ""
+ if s3utils.IsGoogleEndpoint(u) {
+ return
+ } else if s3utils.IsAmazonChinaEndpoint(u) {
+ // For china specifically we need to set everything to
+ // cn-north-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // China region.
+ return "cn-north-1"
+ } else if s3utils.IsAmazonGovCloudEndpoint(u) {
+ // For us-gov specifically we need to set everything to
+ // us-gov-west-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // Gov cloud region.
+ return "us-gov-west-1"
+ }
+ parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
+ if len(parts) > 1 {
+ region = parts[1]
+ }
+ return region
+}
+
+func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
// construct endpoint.
- endpointURL, err := getEndpointURL(endpoint, insecure)
+ endpointURL, err := getEndpointURL(endpoint, secure)
if err != nil {
return nil, err
}
// instantiate new Client.
clnt := new(Client)
- clnt.accessKeyID = accessKeyID
- clnt.secretAccessKey = secretAccessKey
- if clnt.accessKeyID == "" || clnt.secretAccessKey == "" {
- clnt.anonymous = true
- }
+
+ // Save the credentials.
+ clnt.credsProvider = creds
+
+ // Remember whether we are using https or not
+ clnt.secure = secure
// Save endpoint URL, user agent for future uses.
- clnt.endpointURL = endpointURL
+ clnt.endpointURL = *endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- // Setting a sensible time out of 2minutes to wait for response
- // headers. Request is pro-actively cancelled after 2minutes
- // if no response was received from server.
- Timeout: 2 * time.Minute,
- Transport: http.DefaultTransport,
+ Transport: defaultMinioTransport,
+ CheckRedirect: redirectHeaders,
}
- // Instantiae bucket location cache.
+ // Sets custom region, if region is empty bucket location cache is used automatically.
+ if region == "" {
+ region = getRegionFromURL(clnt.endpointURL)
+ }
+ clnt.region = region
+
+ // Instantiate bucket location cache.
clnt.bucketLocCache = newBucketLocationCache()
// Introduce a new locked random seed.
@@ -187,8 +258,7 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
- // if app name and version is not set, we do not a new user
- // agent.
+ // if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
@@ -220,13 +290,6 @@ func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) {
}
}
-// SetClientTimeout - set http client timeout.
-func (c *Client) SetClientTimeout(timeout time.Duration) {
- if c.httpClient != nil {
- c.httpClient.Timeout = timeout
- }
-}
-
// TraceOn - enable HTTP tracing.
func (c *Client) TraceOn(outputStream io.Writer) {
// if outputStream is nil then default to os.Stdout.
@@ -246,8 +309,41 @@ func (c *Client) TraceOff() {
c.isTraceEnabled = false
}
-// requestMetadata - is container for all the values to make a
-// request.
+// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
+// requests. This feature is only specific to S3 for all other endpoints this
+// function does nothing. To read further details on s3 transfer acceleration
+// please vist -
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
+ if s3utils.IsAmazonEndpoint(c.endpointURL) {
+ c.s3AccelerateEndpoint = accelerateEndpoint
+ }
+}
+
+// Hash materials provides relevant initialized hash algo writers
+// based on the expected signature type.
+//
+// - For signature v4 request if the connection is insecure compute only sha256.
+// - For signature v4 request if the connection is secure compute only md5.
+// - For anonymous request compute md5.
+func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) {
+ hashSums = make(map[string][]byte)
+ hashAlgos = make(map[string]hash.Hash)
+ if c.overrideSignerType.IsV4() {
+ if c.secure {
+ hashAlgos["md5"] = md5.New()
+ } else {
+ hashAlgos["sha256"] = sha256.New()
+ }
+ } else {
+ if c.overrideSignerType.IsAnonymous() {
+ hashAlgos["md5"] = md5.New()
+ }
+ }
+ return hashAlgos, hashSums
+}
+
+// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
@@ -267,37 +363,6 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
-// Filter out signature value from Authorization header.
-func (c Client) filterSignature(req *http.Request) {
- // For anonymous requests, no need to filter.
- if c.anonymous {
- return
- }
- // Handle if Signature V2.
- if c.signature.isV2() {
- // Set a temporary redacted auth
- req.Header.Set("Authorization", "AWS **REDACTED**:**REDACTED**")
- return
- }
-
- /// Signature V4 authorization header.
-
- // Save the original auth.
- origAuth := req.Header.Get("Authorization")
- // Strip out accessKeyID from:
- // Credential=////aws4_request
- regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
- newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
-
- // Strip out 256-bit signature from: Signature=<256-bit signature>
- regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
- newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
-
- // Set a temporary redacted auth
- req.Header.Set("Authorization", newAuth)
- return
-}
-
// dumpHTTP - dump HTTP request and response.
func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// Starts http dump.
@@ -307,7 +372,10 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
}
// Filter out Signature field from Authorization header.
- c.filterSignature(req)
+ origAuth := req.Header.Get("Authorization")
+ if origAuth != "" {
+ req.Header.Set("Authorization", redactSignature(origAuth))
+ }
// Only display request header.
reqTrace, err := httputil.DumpRequestOut(req, false)
@@ -369,20 +437,35 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
- // do the request.
- resp, err := c.httpClient.Do(req)
- if err != nil {
- // Handle this specifically for now until future Golang
- // versions fix this issue properly.
- urlErr, ok := err.(*url.Error)
- if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
- return nil, &url.Error{
- Op: urlErr.Op,
- URL: urlErr.URL,
- Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
+ var resp *http.Response
+ var err error
+ // Do the request in a loop in case of 307 http is met since golang still doesn't
+ // handle properly this situation (https://github.com/golang/go/issues/7912)
+ for {
+ resp, err = c.httpClient.Do(req)
+ if err != nil {
+ // Handle this specifically for now until future Golang
+ // versions fix this issue properly.
+ urlErr, ok := err.(*url.Error)
+ if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
+ return nil, &url.Error{
+ Op: urlErr.Op,
+ URL: urlErr.URL,
+ Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."),
+ }
}
+ return nil, err
+ }
+ // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
+ if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
+ newURL, err := url.Parse(resp.Header.Get("Location"))
+ if err != nil {
+ break
+ }
+ req.URL = newURL
+ } else {
+ break
}
- return nil, err
}
// Response cannot be non-nil, report if its the case.
@@ -417,9 +500,20 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
+ switch bodySeeker {
+ case os.Stdin, os.Stdout, os.Stderr:
+ isRetryable = false
+ }
+ // Figure out if the body can be closed - if yes
+ // we will definitely close it upon the function
+ // return.
+ bodyCloser, ok := metadata.contentBody.(io.Closer)
+ if ok {
+ defer bodyCloser.Close()
+ }
}
- // Create a done channel to control 'ListObjects' go routine.
+ // Create a done channel to control 'newRetryTimer' go routine.
doneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
@@ -428,7 +522,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
- for _ = range c.newRetryTimer(MaxRetry, time.Second, time.Second*30, MaxJitter, doneCh) {
+ for range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@@ -472,20 +566,39 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
+ // res.Body should be closed
+ closeResponse(res)
if err != nil {
return nil, err
}
+
// Save the body.
errBodySeeker := bytes.NewReader(errBodyBytes)
res.Body = ioutil.NopCloser(errBodySeeker)
// For errors verify if its retryable otherwise fail quickly.
errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName))
- // Bucket region if set in error response, we can retry the
- // request with the new region.
- if errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
+
+ // Save the body back again.
+ errBodySeeker.Seek(0, 0) // Seek back to starting point.
+ res.Body = ioutil.NopCloser(errBodySeeker)
+
+ // Bucket region if set in error response and the error
+ // code dictates invalid region, we can retry the request
+ // with the new region.
+ //
+ // Additionally we should only retry if bucketLocation and custom
+ // region is empty.
+ if metadata.bucketLocation == "" && c.region == "" {
+ if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
+ if metadata.bucketName != "" && errResponse.Region != "" {
+ // Gather Cached location only if bucketName is present.
+ if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ }
+ }
}
// Verify if error response code is retryable.
@@ -498,10 +611,6 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
continue // Retry.
}
- // Save the body back again.
- errBodySeeker.Seek(0, 0) // Seek back to starting point.
- res.Body = ioutil.NopCloser(errBodySeeker)
-
// For all other cases break out of the retry loop.
break
}
@@ -515,29 +624,27 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
method = "POST"
}
- // Default all requests to "us-east-1" or "cn-north-1" (china region)
- location := "us-east-1"
- if isAmazonChinaEndpoint(c.endpointURL) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- location = "cn-north-1"
- }
-
- // Gather location only if bucketName is present.
- if metadata.bucketName != "" {
- location, err = c.getBucketLocation(metadata.bucketName)
- if err != nil {
- return nil, err
+ location := metadata.bucketLocation
+ if location == "" {
+ if metadata.bucketName != "" {
+ // Gather location only if bucketName is present.
+ location, err = c.getBucketLocation(metadata.bucketName)
+ if err != nil {
+ if ToErrorResponse(err).Code != "AccessDenied" {
+ return nil, err
+ }
+ }
+ // Upon AccessDenied error on fetching bucket location, default
+ // to possible locations based on endpoint URL. This can usually
+ // happen when GetBucketLocation() is disabled using IAM policies.
+ }
+ if location == "" {
+ location = getDefaultLocation(c.endpointURL, c.region)
}
}
- // Save location.
- metadata.bucketLocation = location
-
// Construct a new target URL.
- targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, metadata.bucketLocation, metadata.queryValues)
+ targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, metadata.queryValues)
if err != nil {
return nil, err
}
@@ -548,30 +655,46 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
return nil, err
}
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
+ }
+
// Generate presign url if needed, return right here.
if metadata.expires != 0 && metadata.presignURL {
- if c.anonymous {
- return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.")
+ if signerType.IsAnonymous() {
+ return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.")
}
- if c.signature.isV2() {
+ if signerType.IsV2() {
// Presign URL with signature v2.
- req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
- } else {
+ req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires)
+ } else if signerType.IsV4() {
// Presign URL with signature v4.
- req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
+ req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires)
}
return req, nil
}
- // Set content body if available.
- if metadata.contentBody != nil {
- req.Body = ioutil.NopCloser(metadata.contentBody)
- }
-
- // set 'Expect' header for the request.
- req.Header.Set("Expect", "100-continue")
-
- // set 'User-Agent' header for the request.
+ // Set 'User-Agent' header for the request.
c.setUserAgent(req)
// Set all headers.
@@ -579,21 +702,21 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set(k, v[0])
}
- // set incoming content-length.
- if metadata.contentLength > 0 {
- req.ContentLength = metadata.contentLength
+ // Go net/http notoriously closes the request body.
+ // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors.
+ // This can cause underlying *os.File seekers to fail, avoid that
+ // by making sure to wrap the closer as a nop.
+ if metadata.contentLength == 0 {
+ req.Body = nil
+ } else {
+ req.Body = ioutil.NopCloser(metadata.contentBody)
}
- // Set sha256 sum only for non anonymous credentials.
- if !c.anonymous {
- // set sha256 sum for signature calculation only with
- // signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
- if metadata.contentSHA256Bytes != nil {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes))
- }
- }
+ // Set incoming content-length.
+ req.ContentLength = metadata.contentLength
+ if req.ContentLength <= -1 {
+ // For unknown content length, we upload using transfer-encoding: chunked.
+ req.TransferEncoding = []string{"chunked"}
}
// set md5Sum for content protection.
@@ -601,15 +724,31 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
}
- // Sign the request for all authenticated requests.
- if !c.anonymous {
- if c.signature.isV2() {
- // Add signature version '2' authorization header.
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
- } else if c.signature.isV4() {
- // Add signature version '4' authorization header.
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
+ // For anonymous requests just return.
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ switch {
+ case signerType.IsV2():
+ // Add signature version '2' authorization header.
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
+ // Streaming signature is used by default for a PUT object request. Additionally we also
+ // look if the initialized client is secure, if yes then we don't need to perform
+ // streaming signature.
+ req = s3signer.StreamingSignV4(req, accessKeyID,
+ secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
+ default:
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ shaHeader := unsignedPayload
+ if len(metadata.contentSHA256Bytes) > 0 {
+ shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
}
+ req.Header.Set("X-Amz-Content-Sha256", shaHeader)
+
+ // Add signature version '4' authorization header.
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location)
}
// Return request.
@@ -626,22 +765,46 @@ func (c Client) setUserAgent(req *http.Request) {
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
- // Save host.
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
- if isAmazonEndpoint(c.endpointURL) {
- // Fetch new host based on the bucket location.
- host = getS3Endpoint(bucketLocation)
+ if s3utils.IsAmazonEndpoint(c.endpointURL) {
+ if c.s3AccelerateEndpoint != "" && bucketName != "" {
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ // Disable transfer acceleration for non-compliant bucket names.
+ if strings.Contains(bucketName, ".") {
+ return nil, ErrTransferAccelerationBucket(bucketName)
+ }
+ // If transfer acceleration is requested set new host.
+ // For more details about enabling transfer acceleration read here.
+ // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
+ host = c.s3AccelerateEndpoint
+ } else {
+ // Do not change the host if the endpoint URL is a FIPS S3 endpoint.
+ if !s3utils.IsAmazonFIPSGovCloudEndpoint(c.endpointURL) {
+ // Fetch new host based on the bucket location.
+ host = getS3Endpoint(bucketLocation)
+ }
+ }
}
+
// Save scheme.
scheme := c.endpointURL.Scheme
+ // Strip port 80 and 443 so we won't send these ports in Host header.
+ // The reason is that browsers and curl automatically remove :80 and :443
+ // with the generated presigned urls, then a signature mismatch error.
+ if h, p, err := net.SplitHostPort(host); err == nil {
+ if scheme == "http" && p == "80" || scheme == "https" && p == "443" {
+ host = h
+ }
+ }
+
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
- isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
+ isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
@@ -649,23 +812,26 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
- urlStr = urlStr + urlEncodePath(objectName)
+ urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
+
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
- urlStr = urlStr + "?" + queryEncode(queryValues)
+ urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
+
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
+
return u, nil
}
diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml
index a5dc2b226b522ce79d1c55f1eabe21a4ab7f0e98..0f623d3d4ce70f4d5d346faa49a7897294d0a87b 100644
--- a/vendor/github.com/minio/minio-go/appveyor.yml
+++ b/vendor/github.com/minio/minio-go/appveyor.yml
@@ -17,14 +17,17 @@ install:
- go version
- go env
- go get -u github.com/golang/lint/golint
+ - go get -u github.com/go-ini/ini
+ - go get -u github.com/minio/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
+ - go get -u github.com/dustin/go-humanize
# to run your custom scripts instead of automatic MSBuild
build_script:
- go vet ./...
- gofmt -s -l .
- - golint github.com/minio/minio-go...
+ - golint -set_exit_status github.com/minio/minio-go...
- deadcode
- ineffassign .
- go test -short -v
diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go
index 14ac170447f4fa62f740946452b487d930415c50..3ad06da3a9ff766adf22fb34969241f1f3b20bc8 100644
--- a/vendor/github.com/minio/minio-go/bucket-cache.go
+++ b/vendor/github.com/minio/minio-go/bucket-cache.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,11 +22,14 @@ import (
"net/http"
"net/url"
"path"
- "strings"
"sync"
+
+ "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/s3signer"
+ "github.com/minio/minio-go/pkg/s3utils"
)
-// bucketLocationCache - Provides simple mechansim to hold bucket
+// bucketLocationCache - Provides simple mechanism to hold bucket
// locations in memory.
type bucketLocationCache struct {
// mutex is used for handling the concurrent
@@ -66,18 +70,29 @@ func (r *bucketLocationCache) Delete(bucketName string) {
delete(r.items, bucketName)
}
-// getBucketLocation - Get location for the bucketName from location map cache.
+// GetBucketLocation - get location for the bucket name from location cache, if not
+// fetch freshly by making a new request.
+func (c Client) GetBucketLocation(bucketName string) (string, error) {
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ return c.getBucketLocation(bucketName)
+}
+
+// getBucketLocation - Get location for the bucketName from location map cache, if not
+// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) {
- if location, ok := c.bucketLocCache.Get(bucketName); ok {
- return location, nil
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+
+ // Region set then no need to fetch bucket location.
+ if c.region != "" {
+ return c.region, nil
}
- if isAmazonChinaEndpoint(c.endpointURL) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- return "cn-north-1", nil
+ if location, ok := c.bucketLocCache.Get(bucketName); ok {
+ return location, nil
}
// Initialize a new request.
@@ -109,7 +124,7 @@ func processBucketLocationResponse(resp *http.Response, bucketName string) (buck
// For access denied error, it could be an anonymous
// request. Move forward and let the top level callers
// succeed if possible based on their policy.
- if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
+ if errResp.Code == "AccessDenied" {
return "us-east-1", nil
}
return "", err
@@ -160,16 +175,48 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set UserAgent for the request.
c.setUserAgent(req)
- // Set sha256 sum for signature calculation only with signature version '4'.
- if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ // Get credentials from the configured credentials provider.
+ value, err := c.credsProvider.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ var (
+ signerType = value.SignerType
+ accessKeyID = value.AccessKeyID
+ secretAccessKey = value.SecretAccessKey
+ sessionToken = value.SessionToken
+ )
+
+ // Custom signer set then override the behavior.
+ if c.overrideSignerType != credentials.SignatureDefault {
+ signerType = c.overrideSignerType
+ }
+
+ // If signerType returned by credentials helper is anonymous,
+ // then do not sign regardless of signerType override.
+ if value.SignerType == credentials.SignatureAnonymous {
+ signerType = credentials.SignatureAnonymous
}
- // Sign the request.
- if c.signature.isV4() {
- req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
- } else if c.signature.isV2() {
- req = signV2(*req, c.accessKeyID, c.secretAccessKey)
+ if signerType.IsAnonymous() {
+ return req, nil
}
+
+ if signerType.IsV2() {
+ req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ return req, nil
+ }
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ var contentSha256 string
+ if c.secure {
+ contentSha256 = unsignedPayload
+ } else {
+ contentSha256 = hex.EncodeToString(sum256([]byte{}))
+ }
+
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
return req, nil
}
diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go
new file mode 100644
index 0000000000000000000000000000000000000000..5ac52e5f74d4adc1c2b93259f85d02bfeb17b187
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/bucket-notification.go
@@ -0,0 +1,231 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "reflect"
+)
+
+// NotificationEventType is a S3 notification event associated to the bucket notification configuration
+type NotificationEventType string
+
+// The role of all event types are described in :
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
+ ObjectCreatedPut = "s3:ObjectCreated:Put"
+ ObjectCreatedPost = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+ ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+ ObjectAccessedGet = "s3:ObjectAccessed:Get"
+ ObjectAccessedHead = "s3:ObjectAccessed:Head"
+ ObjectAccessedAll = "s3:ObjectAccessed:*"
+ ObjectRemovedAll = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource}
+}
+
+// Return the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// NotificationConfig - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type NotificationConfig struct {
+ ID string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []NotificationEventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewNotificationConfig creates one notification config and sets the given ARN
+func NewNotificationConfig(arn Arn) NotificationConfig {
+ return NotificationConfig{Arn: arn}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *NotificationConfig) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *NotificationConfig) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ NotificationConfig
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ NotificationConfig
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ NotificationConfig
+ Lambda string `xml:"CloudFunction"`
+}
+
+// BucketNotification - the struct that represents the whole XML to be sent to the web service
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
+ newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ if reflect.DeepEqual(n, newTopicConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
+ newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if reflect.DeepEqual(n, newQueueConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
+ newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if reflect.DeepEqual(n, newLambdaConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
diff --git a/vendor/github.com/minio/minio-go/bucket-policy.go b/vendor/github.com/minio/minio-go/bucket-policy.go
deleted file mode 100644
index 57e3f2d02e3889823999fd500ec8e6bde8e904a2..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/bucket-policy.go
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/json"
- "fmt"
- "sort"
-)
-
-// maximum supported access policy size.
-const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
-
-// Resource prefix for all aws resources.
-const awsResourcePrefix = "arn:aws:s3:::"
-
-// BucketPolicy - Bucket level policy.
-type BucketPolicy string
-
-// Different types of Policies currently supported for buckets.
-const (
- BucketPolicyNone BucketPolicy = "none"
- BucketPolicyReadOnly = "readonly"
- BucketPolicyReadWrite = "readwrite"
- BucketPolicyWriteOnly = "writeonly"
-)
-
-// isValidBucketPolicy - Is provided policy value supported.
-func (p BucketPolicy) isValidBucketPolicy() bool {
- switch p {
- case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
- return true
- }
- return false
-}
-
-// User - canonical users list.
-type User struct {
- AWS []string
-}
-
-// Statement - minio policy statement
-type Statement struct {
- Sid string
- Effect string
- Principal User `json:"Principal"`
- Actions []string `json:"Action"`
- Resources []string `json:"Resource"`
- Conditions map[string]map[string]string `json:"Condition,omitempty"`
-}
-
-// BucketAccessPolicy - minio policy collection
-type BucketAccessPolicy struct {
- Version string // date in 0000-00-00 format
- Statements []Statement `json:"Statement"`
-}
-
-// Read write actions.
-var (
- readWriteBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucket",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level read-write actions here.
- }
- readWriteObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:GetObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level read-write actions here.
- }
-)
-
-// Write only actions.
-var (
- writeOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level write actions here.
- }
- writeOnlyObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level write actions here.
- }
-)
-
-// Read only actions.
-var (
- readOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucket",
- // Add more bucket level read actions here.
- }
- readOnlyObjectActions = []string{
- "s3:GetObject",
- // Add more object level read actions here.
- }
-)
-
-// subsetActions returns true if the first array is completely
-// contained in the second array. There must be at least
-// the same number of duplicate values in second as there
-// are in first.
-func subsetActions(first, second []string) bool {
- set := make(map[string]int)
- for _, value := range second {
- set[value]++
- }
- for _, value := range first {
- if count, found := set[value]; !found {
- return false
- } else if count < 1 {
- return false
- } else {
- set[value] = count - 1
- }
- }
- return true
-}
-
-// Verifies if we have read/write policy set at bucketName, objectPrefix.
-func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readWrite bool
- sort.Strings(readWriteBucketActions)
- sort.Strings(readWriteObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readWriteBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(readWriteObjectActions, statement.Actions) {
- readWrite = true
- }
- }
- }
- }
- return commonActions && readWrite
-}
-
-// Verifies if we have write only policy set at bucketName, objectPrefix.
-func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, writeOnly bool
- sort.Strings(writeOnlyBucketActions)
- sort.Strings(writeOnlyObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(writeOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(writeOnlyObjectActions, statement.Actions) {
- writeOnly = true
- }
- }
- }
- }
- return commonActions && writeOnly
-}
-
-// Verifies if we have read only policy set at bucketName, objectPrefix.
-func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readOnly bool
- sort.Strings(readOnlyBucketActions)
- sort.Strings(readOnlyObjectActions)
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- if subsetActions(readOnlyObjectActions, statement.Actions) {
- readOnly = true
- break
- }
- }
- }
- }
- return commonActions && readOnly
-}
-
-// Removes read write bucket policy if found.
-func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucket", "s3:ListBucketMultipartUploads":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject", "s3:GetObject":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Removes write only bucket policy if found.
-func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucketMultipartUploads":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", "s3:DeleteObject":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Removes read only bucket policy if found.
-func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- var newActions []string
- for _, action := range statement.Actions {
- switch action {
- case "s3:GetBucketLocation", "s3:ListBucket":
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- } else if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- if action == "s3:GetObject" {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Remove bucket policies based on the type.
-func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
- // Verify type of policy to be removed.
- if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
- } else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
- } else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
- }
- return statements
-}
-
-// Unmarshals bucket policy byte array into a structured bucket access policy.
-func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
- // Untyped lazy JSON struct.
- type bucketAccessPolicyUntyped struct {
- Version string
- Statement []struct {
- Sid string
- Effect string
- Principal struct {
- AWS json.RawMessage
- }
- Action json.RawMessage
- Resource json.RawMessage
- Condition map[string]map[string]string
- }
- }
- var policyUntyped = bucketAccessPolicyUntyped{}
- // Unmarshal incoming policy into an untyped structure, to be
- // evaluated lazily later.
- err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- var policy = BucketAccessPolicy{}
- policy.Version = policyUntyped.Version
- for _, stmtUntyped := range policyUntyped.Statement {
- statement := Statement{}
- // These are properly typed messages.
- statement.Sid = stmtUntyped.Sid
- statement.Effect = stmtUntyped.Effect
- statement.Conditions = stmtUntyped.Condition
-
- // AWS user can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
- if err != nil {
- var awsUser string
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Principal.AWS = []string{awsUser}
- }
- // Actions can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
- if err != nil {
- var action string
- err = json.Unmarshal(stmtUntyped.Action, &action)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Actions = []string{action}
- }
- // Resources can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
- if err != nil {
- var resource string
- err = json.Unmarshal(stmtUntyped.Resource, &resource)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Resources = []string{resource}
- }
- // Append the typed policy.
- policy.Statements = append(policy.Statements, statement)
- }
- return policy, nil
-}
-
-// Identifies the policy type from policy Statements.
-func identifyPolicyType(policy BucketAccessPolicy, bucketName, objectPrefix string) (bucketPolicy BucketPolicy) {
- if policy.Statements == nil {
- return BucketPolicyNone
- }
- if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadWrite
- } else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyWriteOnly
- } else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadOnly
- }
- return BucketPolicyNone
-}
-
-// Generate policy statements for various bucket policies.
-// refer to http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
-// for more details about statement fields.
-func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix string) ([]Statement, error) {
- if !bucketPolicy.isValidBucketPolicy() {
- return []Statement{}, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
- }
- var statements []Statement
- if bucketPolicy == BucketPolicyNone {
- return []Statement{}, nil
- } else if bucketPolicy == BucketPolicyReadWrite {
- // Get read-write policy.
- statements = setReadWriteStatement(bucketName, objectPrefix)
- } else if bucketPolicy == BucketPolicyReadOnly {
- // Get read only policy.
- statements = setReadOnlyStatement(bucketName, objectPrefix)
- } else if bucketPolicy == BucketPolicyWriteOnly {
- // Return Write only policy.
- statements = setWriteOnlyStatement(bucketName, objectPrefix)
- }
- return statements, nil
-}
-
-// Obtain statements for read-write BucketPolicy.
-func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- objectResourceStatement := Statement{}
- statements := []Statement{}
-
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readWriteBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readWriteObjectActions
- // Save the read write policy.
- statements = append(statements, bucketResourceStatement, objectResourceStatement)
- return statements
-}
-
-// Obtain statements for read only BucketPolicy.
-func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- objectResourceStatement := Statement{}
- statements := []Statement{}
-
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readOnlyObjectActions
- // Save the read only policy.
- statements = append(statements, bucketResourceStatement, objectResourceStatement)
- return statements
-}
-
-// Obtain statements for write only BucketPolicy.
-func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- objectResourceStatement := Statement{}
- statements := []Statement{}
- // Write only policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = writeOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = writeOnlyObjectActions
- // Save the write only policy.
- statements = append(statements, bucketResourceStatement, objectResourceStatement)
- return statements
-}
diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go
index b0aa009d89d6753e4e7e29ae54f437761d529ebe..9771d2f9297081c6de54c7667950794239646d17 100644
--- a/vendor/github.com/minio/minio-go/constants.go
+++ b/vendor/github.com/minio/minio-go/constants.go
@@ -18,9 +18,17 @@ package minio
/// Multipart upload defaults.
-// miniPartSize - minimum part size 5MiB per object after which
+// absMinPartSize - absolute minimum part size (5 MiB) below which
+// a part in a multipart upload may not be uploaded.
+const absMinPartSize = 1024 * 1024 * 5
+
+// minPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
-const minPartSize = 1024 * 1024 * 5
+const minPartSize = 1024 * 1024 * 64
+
+// copyPartSize - default (and maximum) part size to copy in a
+// copy-object request (5GiB)
+const copyPartSize = 1024 * 1024 * 1024 * 5
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
@@ -37,6 +45,22 @@ const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5
// Multipart operation.
const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
-// optimalReadBufferSize - optimal buffer 5MiB used for reading
-// through Read operation.
-const optimalReadBufferSize = 1024 * 1024 * 5
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// Total number of parallel workers used for multipart operation.
+var totalWorkers = 3
+
+// Signature related constants.
+const (
+ signV4Algorithm = "AWS4-HMAC-SHA256"
+ iso8601DateFormat = "20060102T150405Z"
+)
+
+// Encryption headers stored along with the object.
+const (
+ amzHeaderIV = "X-Amz-Meta-X-Amz-Iv"
+ amzHeaderKey = "X-Amz-Meta-X-Amz-Key"
+ amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc"
+)
diff --git a/vendor/github.com/minio/minio-go/copy-conditions.go b/vendor/github.com/minio/minio-go/copy-conditions.go
deleted file mode 100644
index 9dd63f65e7903ca794881458a965367c05b3f4c0..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/copy-conditions.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "net/http"
- "time"
-)
-
-// copyCondition explanation:
-// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
-//
-// Example:
-//
-// copyCondition {
-// key: "x-amz-copy-if-modified-since",
-// value: "Tue, 15 Nov 1994 12:45:26 GMT",
-// }
-//
-type copyCondition struct {
- key string
- value string
-}
-
-// CopyConditions - copy conditions.
-type CopyConditions struct {
- conditions []copyCondition
-}
-
-// NewCopyConditions - Instantiate new list of conditions.
-func NewCopyConditions() CopyConditions {
- return CopyConditions{
- conditions: make([]copyCondition, 0),
- }
-}
-
-// SetMatchETag - set match etag.
-func (c CopyConditions) SetMatchETag(etag string) error {
- if etag == "" {
- return ErrInvalidArgument("ETag cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-match",
- value: etag,
- })
- return nil
-}
-
-// SetMatchETagExcept - set match etag except.
-func (c CopyConditions) SetMatchETagExcept(etag string) error {
- if etag == "" {
- return ErrInvalidArgument("ETag cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-none-match",
- value: etag,
- })
- return nil
-}
-
-// SetUnmodified - set unmodified time since.
-func (c CopyConditions) SetUnmodified(modTime time.Time) error {
- if modTime.IsZero() {
- return ErrInvalidArgument("Modified since cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-unmodified-since",
- value: modTime.Format(http.TimeFormat),
- })
- return nil
-}
-
-// SetModified - set modified time since.
-func (c CopyConditions) SetModified(modTime time.Time) error {
- if modTime.IsZero() {
- return ErrInvalidArgument("Modified since cannot be empty.")
- }
- c.conditions = append(c.conditions, copyCondition{
- key: "x-amz-copy-source-if-modified-since",
- value: modTime.Format(http.TimeFormat),
- })
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go
new file mode 100644
index 0000000000000000000000000000000000000000..4b1054a696822f5dfe8d1eaf6e4667751a7508a8
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/core.go
@@ -0,0 +1,121 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "io"
+
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+// Core - Inherits Client and adds new methods to expose the low level S3 APIs.
+type Core struct {
+ *Client
+}
+
+// NewCore - Returns new initialized a Core client, this CoreClient should be
+// only used under special conditions such as need to access lower primitives
+// and being able to use them to write your own wrappers.
+func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) {
+ var s3Client Core
+ client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure)
+ if err != nil {
+ return nil, err
+ }
+ s3Client.Client = client
+ return &s3Client, nil
+}
+
+// ListObjects - List all the objects at a prefix, optionally with marker and delimiter
+// you can further filter the results.
+func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) {
+ return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys)
+}
+
+// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses
+// continuationToken instead of marker to further filter the results.
+func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) {
+ return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
+}
+
+// PutObject - Upload object. Uploads using single PUT call.
+func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {
+ return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)
+}
+
+// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.
+func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {
+ result, err := c.initiateMultipartUpload(bucket, object, metadata)
+ return result.UploadID, err
+}
+
+// ListMultipartUploads - List incomplete uploads.
+func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) {
+ return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads)
+}
+
+// PutObjectPart - Upload an object part.
+func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
+ return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
+}
+
+// PutObjectPartWithMetadata - upload an object part with additional request metadata.
+func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
+ size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
+ return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
+}
+
+// ListObjectParts - List uploaded parts of an incomplete upload.x
+func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) {
+ return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts)
+}
+
+// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
+func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
+ _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
+ Parts: parts,
+ })
+ return err
+}
+
+// AbortMultipartUpload - Abort an incomplete upload.
+func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
+ return c.abortMultipartUpload(bucket, object, uploadID)
+}
+
+// GetBucketPolicy - fetches bucket access policy for a given bucket.
+func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) {
+ return c.getBucketPolicy(bucket)
+}
+
+// PutBucketPolicy - applies a new bucket access policy for a given bucket.
+func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error {
+ return c.putBucketPolicy(bucket, bucketPolicy)
+}
+
+// GetObject is a lower level API implemented to support reading
+// partial objects and also downloading objects with special conditions
+// matching etag, modtime etc.
+func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
+ return c.getObject(bucketName, objectName, reqHeaders)
+}
+
+// StatObject is a lower level API implemented to support special
+// conditions matching etag, modtime on a request.
+func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
+ return c.statObject(bucketName, objectName, reqHeaders)
+}
diff --git a/vendor/github.com/minio/minio-go/functional_tests.go b/vendor/github.com/minio/minio-go/functional_tests.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec554e4fe9ae585d930179d0a1a666f482cc3577
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/functional_tests.go
@@ -0,0 +1,4912 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ minio "github.com/minio/minio-go"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/dustin/go-humanize"
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+const (
+ sixtyFiveMiB = 65 * humanize.MiByte // 65MiB
+ thirtyThreeKiB = 33 * humanize.KiByte // 33KiB
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
+func isQuickMode() bool {
+ return os.Getenv("MODE") == "quick"
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketError() {
+ region := "eu-central-1"
+
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // skipping region functional tests for non s3 runs
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket Failed", err).Fatal()
+ }
+ if err = c.MakeBucket(bucketName, region); err == nil {
+ failureLog(function, args, startTime, "", "Bucket already exists", err).Fatal()
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+ }
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+ }
+
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegions() {
+ region := "eu-central-1"
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // skipping region functional tests for non s3 runs
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ region = "us-west-2"
+ args["region"] = region
+ if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Remove the newly created bucket.
+ if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+ failureLog(function, args, startTime, "", "Remove bucket failed", err).Fatal()
+ }
+
+ successLogger(function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectReadAt() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObject(bucketName, objectName, reader, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "objectContentType": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+ }
+
+ // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
+ // Use different data for each part for multipart tests to ensure part order at the end.
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object content type
+ objectContentType := "binary/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ n, err := c.PutObject(bucketName, objectName, reader, objectContentType)
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Get Object failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat Object failed", err).Fatal()
+ }
+ if st.Size != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+ }
+ if st.ContentType != objectContentType {
+ failureLog(function, args, startTime, "", "Content types don't match", err).Fatal()
+ }
+ if err := r.Close(); err != nil {
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ }
+ if err := r.Close(); err == nil {
+ failureLog(function, args, startTime, "", "Object is already closed, didn't return error on Close", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ successLogger(function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectWithMetadata() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithMetadata(bucketName, objectName, reader, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "metadata": "",
+ }
+
+ if isQuickMode() {
+ ignoredLog(function, args, startTime, "Skipping functional tests for short runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "Make bucket failed", err).Fatal()
+ }
+
+ // Generate data using 2 parts
+ // Use different data in each part for multipart tests to ensure part order at the end.
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+
+ n, err := c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
+ "Content-Type": {customContentType},
+ }, nil)
+ args["metadata"] = map[string][]string{
+ "Content-Type": {customContentType},
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ if st.Size != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(sixtyFiveMiB)+" got "+string(st.Size), err).Fatal()
+ }
+ if st.ContentType != customContentType {
+ failureLog(function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err).Fatal()
+ }
+ if err := r.Close(); err != nil {
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ }
+ if err := r.Close(); err == nil {
+ failureLog(function, args, startTime, "", "Object already closed, should respond with error", err).Fatal()
+ }
+
+ if err = c.RemoveObject(bucketName, objectName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ successLogger(function, args, startTime).Info()
+}
+
+// Test put object with streaming signature.
+func testPutObjectStreaming() {
+ // initialize logging params
+ objectName := "test-object"
+ startTime := time.Now()
+ function := "PutObjectStreaming(bucketName, objectName, reader)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": objectName,
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+ "minio-go-test")
+ args["bucketName"] = bucketName
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Upload an object.
+ sizes := []int64{0, 64*1024 - 1, 64 * 1024}
+
+ for _, size := range sizes {
+ data := bytes.Repeat([]byte("a"), int(size))
+ n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+ }
+
+ if n != size {
+ failureLog(function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err).Fatal()
+ }
+ }
+
+ // Remove the object.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ // Remove the bucket.
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test listing partially uploaded objects.
+func testListPartiallyUploaded() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "isRecursive": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("0"), sixtyFiveMiB*2))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, (sixtyFiveMiB*2)/25)
+ if cerr != nil {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ }
+ if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+ failureLog(function, args, startTime, "", "String not found in PutObject output", err).Fatal()
+ }
+
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ isRecursive := true
+ args["isRecursive"] = isRecursive
+
+ multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
+ for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ failureLog(function, args, startTime, "", "Multipart object error", multiPartObject.Err).Fatal()
+ }
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test get object seeker from the end, using whence set to '2'.
+func testGetObjectSeekEnd() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ }
+
+ pos, err := r.Seek(-100, 2)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+ }
+ if pos != st.Size-100 {
+ failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+ }
+ buf2 := make([]byte, 100)
+ m, err := io.ReadFull(r, buf2)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Error reading through io.ReadFull", err).Fatal()
+ }
+ if m != len(buf2) {
+ failureLog(function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+ }
+ hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
+ hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
+ if hexBuf1 != hexBuf2 {
+ failureLog(function, args, startTime, "", "Values at same index dont match", err).Fatal()
+ }
+ pos, err = r.Seek(-100, 2)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Object Seek failed", err).Fatal()
+ }
+ if pos != st.Size-100 {
+ failureLog(function, args, startTime, "", "Incorrect position", err).Fatal()
+ }
+ if err = r.Close(); err != nil {
+ failureLog(function, args, startTime, "", "ObjectClose failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwice() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ }
+ if err := r.Close(); err != nil {
+ failureLog(function, args, startTime, "", "Object Close failed", err).Fatal()
+ }
+ if err := r.Close(); err == nil {
+ failureLog(function, args, startTime, "", "Already closed object. No error returned", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test removing multiple objects with Remove API
+func testRemoveMultipleObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 1100 objects
+ nrObjects := 1100
+
+ objectsCh := make(chan string)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ _, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ continue
+ }
+ objectsCh <- objectName
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := c.RemoveObjects(bucketName, objectsCh)
+
+ // Check if errorCh doesn't receive any error
+ select {
+ case r, more := <-errorCh:
+ if more {
+ failureLog(function, args, startTime, "", "Unexpected error", r.Err).Fatal()
+ }
+ }
+
+ // Clean the bucket created by the test
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploaded() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, 128*1024)
+ if cerr != nil {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ }
+ if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+ failureLog(function, args, startTime, "", "String not found", err).Fatal()
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject of a big file to trigger multipart
+func testFPutObjectMultipart() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "objectContentType": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ var fileName = getFilePath("datafile-65-MB")
+ if os.Getenv("MINT_DATA_DIR") == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ }
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ _, err = io.Copy(file, getDataReader("non-existent", sixtyFiveMiB))
+ if err != nil {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ err = file.Close()
+ if err != nil {
+ failureLog(function, args, startTime, "", "File Close failed", err).Fatal()
+ }
+ fileName = file.Name()
+ args["fileName"] = fileName
+ }
+ totalSize := sixtyFiveMiB * 1
+ // Set base object name
+ objectName := bucketName + "FPutObject" + "-standard"
+ args["objectName"] = objectName
+
+ objectContentType := "testapplication/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName, fileName, objectContentType)
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(totalSize) {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ objInfo, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Unexpected error", err).Fatal()
+ }
+ if objInfo.Size != int64(totalSize) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err).Fatal()
+ }
+ if objInfo.ContentType != objectContentType {
+ failureLog(function, args, startTime, "", "ContentType doesn't match", err).Fatal()
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject with null contentType (default = application/octet-stream)
+func testFPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, objectContentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getFilePath("datafile-65-MB")
+ if os.Getenv("MINT_DATA_DIR") == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ }
+
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ var buffer = bytes.Repeat([]byte(string('a')), sixtyFiveMiB)
+ if _, err = file.Write(buffer); err != nil {
+ failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+ }
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ }
+ fName = file.Name()
+ }
+ var totalSize = sixtyFiveMiB * 1
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName+"-standard", fName, "application/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(totalSize) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, "")
+ if err != nil {
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ }
+ if n != int64(totalSize) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ }
+ srcFile, err := os.Open(fName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "File open failed", err).Fatal()
+ }
+ defer srcFile.Close()
+ // Add extension to temp file name
+ tmpFile, err := os.Create(fName + ".gtar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "File create failed", err).Fatal()
+ }
+ defer tmpFile.Close()
+ _, err = io.Copy(tmpFile, srcFile)
+ if err != nil {
+ failureLog(function, args, startTime, "", "File copy failed", err).Fatal()
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", "")
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(totalSize) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err).Fatal()
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err).Fatal()
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ failureLog(function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err).Fatal()
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ err = os.Remove(fName + ".gtar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ }
+
+ defer func() {
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ }()
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat object failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(thirtyThreeKiB)); err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "CopyN failed", err).Fatal()
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ }
+ }
+
+ // Generic seek error for errors other than io.EOF
+ seekErr := errors.New("seek error")
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, thirtyThreeKiB},
+ // Start from offset larger than possible
+ {int64(thirtyThreeKiB) + 1024, 0, 0, seekErr, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, thirtyThreeKiB},
+ // Move larger than possible
+ {int64(thirtyThreeKiB), 1, 0, seekErr, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, seekErr, false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, int64(thirtyThreeKiB) - 1024, io.EOF, true, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(thirtyThreeKiB) - 1024, nil, true, thirtyThreeKiB - 1024, thirtyThreeKiB},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(thirtyThreeKiB) * 2, 2, 0, seekErr, true, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ // We expect an error
+ if testCase.err == seekErr && err == nil {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+ }
+ // We expect a specific error
+ if testCase.err != seekErr && testCase.err != err {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err).Fatal()
+ }
+ // If we expect an error go to the next loop
+ if testCase.err != nil {
+ continue
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err).Fatal()
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ }
+
+ // read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf1) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(st.Size), err).Fatal()
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf2) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf3) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf4) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ }
+ if m != len(buf5) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf, buf5) {
+ failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ }
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test Presigned Post Policy
+func testPresignedPostPolicy() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PresignedPostPolicy(policy)"
+ args := map[string]interface{}{
+ "policy": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ }
+
+ policy := minio.NewPostPolicy()
+
+ if err := policy.SetBucket(""); err == nil {
+ failureLog(function, args, startTime, "", "SetBucket did not fail for invalid conditions", err).Fatal()
+ }
+ if err := policy.SetKey(""); err == nil {
+ failureLog(function, args, startTime, "", "SetKey did not fail for invalid conditions", err).Fatal()
+ }
+ if err := policy.SetKeyStartsWith(""); err == nil {
+ failureLog(function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err).Fatal()
+ }
+ if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
+ failureLog(function, args, startTime, "", "SetExpires did not fail for invalid conditions", err).Fatal()
+ }
+ if err := policy.SetContentType(""); err == nil {
+ failureLog(function, args, startTime, "", "SetContentType did not fail for invalid conditions", err).Fatal()
+ }
+ if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
+ failureLog(function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err).Fatal()
+ }
+
+ policy.SetBucket(bucketName)
+ policy.SetKey(objectName)
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+ policy.SetContentType("image/png")
+ policy.SetContentLengthRange(1024, 1024*1024)
+ args["policy"] = policy
+
+ _, _, err = c.PresignedPostPolicy(policy)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedPostPolicy failed", err).Fatal()
+ }
+
+ policy = minio.NewPostPolicy()
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(dst, src)"
+ args := map[string]interface{}{
+ "dst": "",
+ "src": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+", got "+string(n), err).Fatal()
+ }
+
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ // Copy Source
+ src := minio.NewSourceInfo(bucketName, objectName, nil)
+
+ // Set copy conditions.
+
+ // All invalid conditions first.
+ err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetMatchETagCond("")
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetMatchETagExceptCond("")
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+ }
+
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+ }
+ err = src.SetMatchETagCond(objInfo.ETag)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+ }
+ args["src"] = src
+
+ dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ args["dst"] = dst
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ // Perform the Copy
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ }
+
+ // Source object
+ r, err = c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err).Fatal()
+ }
+
+ // CopyObject again but with wrong conditions
+ src = minio.NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+ }
+ err = src.SetMatchETagExceptCond(objInfo.ETag)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+ }
+
+ // Perform the Copy which should fail
+ err = c.CopyObject(dst, src)
+ if err == nil {
+ failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// TestEncryptionPutGet tests client side encryption
+func testEncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "cbcMaterials": "",
+ "metadata": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate a symmetric key
+ symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Generate an assymmetric key from predefine public and private certificates
+ privateKey, err := hex.DecodeString(
+ "30820277020100300d06092a864886f70d0101010500048202613082025d" +
+ "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
+ "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
+ "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
+ "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
+ "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
+ "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
+ "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
+ "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
+ "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
+ "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
+ "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
+ "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
+ "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
+ "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
+ "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
+ "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
+ "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
+ "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
+ "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
+ "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
+ "9945cb5c7d")
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+ }
+
+ publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
+ "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
+ "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
+ "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
+ "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
+ "80a89e43f29b570203010001")
+ if err != nil {
+ failureLog(function, args, startTime, "", "DecodeString for symmetric Key generation failed", err).Fatal()
+ }
+
+ // Generate an asymmetric key
+ asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err).Fatal()
+ }
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+ args["metadata"] = customContentType
+
+ testCases := []struct {
+ buf []byte
+ encKey encrypt.Key
+ }{
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+ args["cbcMaterials"] = cbcMaterials
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewCBCSecureMaterials failed", err).Fatal()
+ }
+
+ // Put encrypted data
+ _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutEncryptedObject failed", err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetEncryptedObject failed", err).Fatal()
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err).Fatal()
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err).Fatal()
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err).Fatal()
+ }
+
+ // Remove test object
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Test "+string(i+1)+", RemoveObject failed with: "+err.Error(), err).Fatal()
+ }
+
+ }
+
+ // Remove test bucket
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ err = c.RemoveBucket(bucketName)
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+func testBucketNotification() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "SetBucketNotification(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ if os.Getenv("NOTIFY_BUCKET") == "" ||
+ os.Getenv("NOTIFY_SERVICE") == "" ||
+ os.Getenv("NOTIFY_REGION") == "" ||
+ os.Getenv("NOTIFY_ACCOUNTID") == "" ||
+ os.Getenv("NOTIFY_RESOURCE") == "" {
+ ignoredLog(function, args, startTime, "Skipped notification test as it is not configured").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ bucketName := os.Getenv("NOTIFY_BUCKET")
+ args["bucketName"] = bucketName
+
+ topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
+ queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
+
+ topicConfig := minio.NewNotificationConfig(topicArn)
+
+ topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+ topicConfig.AddFilterSuffix("jpg")
+
+ queueConfig := minio.NewNotificationConfig(queueArn)
+ queueConfig.AddEvents(minio.ObjectCreatedAll)
+ queueConfig.AddFilterPrefix("photos/")
+
+ bNotification := minio.BucketNotification{}
+ bNotification.AddTopic(topicConfig)
+
+ // Add the same topicConfig again, should have no effect
+ // because it is duplicated
+ bNotification.AddTopic(topicConfig)
+ if len(bNotification.TopicConfigs) != 1 {
+ failureLog(function, args, startTime, "", "Duplicate entry added", err).Fatal()
+ }
+
+ // Add and remove a queue config
+ bNotification.AddQueue(queueConfig)
+ bNotification.RemoveQueueByArn(queueArn)
+
+ err = c.SetBucketNotification(bucketName, bNotification)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetBucketNotification failed", err).Fatal()
+ }
+
+ bNotification, err = c.GetBucketNotification(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetBucketNotification failed", err).Fatal()
+ }
+
+ if len(bNotification.TopicConfigs) != 1 {
+ failureLog(function, args, startTime, "", "Topic config is empty", err).Fatal()
+ }
+
+ if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
+ failureLog(function, args, startTime, "", "Couldn't get the suffix", err).Fatal()
+ }
+
+ err = c.RemoveAllBucketNotification(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveAllBucketNotification failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests comprehensive list of all methods.
+func testFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testFunctional()"
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, nil, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ function = "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "File creation failed", err).Fatal()
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ failureLog(function, args, startTime, "", "File write failed", err).Fatal()
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ exists, err = c.BucketExists(bucketName)
+ function = "BucketExists(bucketName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+ }
+ if !exists {
+ failureLog(function, args, startTime, "", "Could not find the bucket", err).Fatal()
+ }
+
+ // Asserting the default bucket policy.
+ policyAccess, err := c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ }
+ if policyAccess != "none" {
+ failureLog(function, args, startTime, "", "policy should be set to none", err).Fatal()
+ }
+ // Set the bucket policy to 'public readonly'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadOnly,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ }
+ // should return policy `readonly`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ }
+ if policyAccess != "readonly" {
+ failureLog(function, args, startTime, "", "policy should be set to readonly", err).Fatal()
+ }
+
+ // Make the bucket 'public writeonly'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyWriteOnly,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ }
+ // should return policy `writeonly`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ }
+ if policyAccess != "writeonly" {
+ failureLog(function, args, startTime, "", "policy should be set to writeonly", err).Fatal()
+ }
+ // Make the bucket 'public read/write'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadWrite,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ }
+ // should return policy `readwrite`.
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetBucketPolicy failed", err).Fatal()
+ }
+ if policyAccess != "readwrite" {
+ failureLog(function, args, startTime, "", "policy should be set to readwrite", err).Fatal()
+ }
+ // List all buckets.
+ buckets, err := c.ListBuckets()
+ function = "ListBuckets()"
+ args = nil
+
+ if len(buckets) == 0 {
+ failureLog(function, args, startTime, "", "Found bucket list to be empty", err).Fatal()
+ }
+ if err != nil {
+ failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ failureLog(function, args, startTime, "", "Bucket: "+bucketName+" not found", err).Fatal()
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("f"), 1<<19)
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ function = "PutObject(bucketName, objectName, reader, contentType)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(len(buf)) {
+ failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-nolength",
+ "contentType": "binary/octet-stream",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(len(buf)) {
+ failureLog(function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err).Fatal()
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ failureLog(function, args, startTime, "", "Object "+objectName+" not found", err).Fatal()
+ }
+
+ incompObjNotFound := true
+
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+ }
+
+ newReader, err := c.GetObject(bucketName, objectName)
+ function = "GetObject(bucketName, objectName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ failureLog(function, args, startTime, "", "GetObject bytes mismatch", err).Fatal()
+ }
+
+ err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ function = "FGetObject(bucketName, objectName, fileName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "FGetObject failed", err).Fatal()
+ }
+
+ // Generate presigned HEAD object url.
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ }
+ if resp.Header.Get("ETag") == "" {
+ failureLog(function, args, startTime, "", "PresignedHeadObject response incorrect", err).Fatal()
+ }
+ resp.Body.Close()
+
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ }
+
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ "reqParams": reqParams,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err).Fatal()
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject response incorrect", err).Fatal()
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch for presigned GET URL", err).Fatal()
+ }
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ failureLog(function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err).Fatal()
+ }
+
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ }
+
+ buf = bytes.Repeat([]byte("g"), 1<<19)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ failureLog(function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err).Fatal()
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ }
+
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject after PresignedPutObject failed", err).Fatal()
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll after GetObject failed", err).Fatal()
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ function = "RemoveObject(bucketName, objectName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveObject(bucketName, objectName+"-f")
+ args["objectName"] = objectName + "-f"
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
+ args["objectName"] = objectName + "-nolength"
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
+ args["objectName"] = objectName + "-presigned"
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ function = "RemoveBucket(bucketName)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err == nil {
+ failureLog(function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err).Fatal()
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ if err = os.Remove(fileName); err != nil {
+ failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ failureLog(function, args, startTime, "", "File Remove failed", err).Fatal()
+ }
+ function = "testFunctional()"
+ successLogger(function, args, startTime).Info()
+}
+
+// Test for validating GetObject Reader* methods functioning when the
+// object is modified in the object store.
+func testGetObjectObjectModified() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+ defer c.RemoveBucket(bucketName)
+
+ // Upload an object.
+ objectName := "myobject"
+ content := "helloworld"
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
+ if err != nil {
+ failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+ }
+
+ defer c.RemoveObject(bucketName, objectName)
+
+ reader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err).Fatal()
+ }
+ defer reader.Close()
+
+ // Read a few bytes of the object.
+ b := make([]byte, 5)
+ n, err := reader.ReadAt(b, 0)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err).Fatal()
+ }
+
+ // Upload different contents to the same object while object is being read.
+ newContent := "goodbyeworld"
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
+ if err != nil {
+ failureLog(function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err).Fatal()
+ }
+
+ // Confirm that a Stat() call in between doesn't change the Object's cached etag.
+ _, err = reader.Stat()
+ expectedError := "At least one of the pre-conditions you specified did not hold"
+ if err.Error() != expectedError {
+ failureLog(function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+ }
+
+ // Read again only to find object contents have been modified since last read.
+ _, err = reader.ReadAt(b, int64(n))
+ if err.Error() != expectedError {
+ failureLog(function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test validates putObject to upload a file seeked at a given offset.
+func testPutObjectUploadSeekedObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileToUpload": "",
+ "contentType": "binary/octet-stream",
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+ defer c.RemoveBucket(bucketName)
+
+ tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
+ args["fileToUpload"] = tempfile
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "TempFile create failed", err).Fatal()
+ }
+
+ var data []byte
+ if fileName := getFilePath("datafile-100-kB"); fileName != "" {
+ data, _ = ioutil.ReadFile(fileName)
+ } else {
+ // Generate data more than 32K
+ data = bytes.Repeat([]byte("1"), 120000)
+ }
+ var length = len(data)
+ if _, err = tempfile.Write(data); err != nil {
+ failureLog(function, args, startTime, "", "TempFile write failed", err).Fatal()
+ }
+
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ offset := length / 2
+ if _, err := tempfile.Seek(int64(offset), 0); err != nil {
+ failureLog(function, args, startTime, "", "TempFile seek failed", err).Fatal()
+ }
+
+ n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+ if n != int64(length-offset) {
+ failureLog(function, args, startTime, "", "Invalid length returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
+ }
+ tempfile.Close()
+ if err = os.Remove(tempfile.Name()); err != nil {
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ }
+
+ length = int(n)
+
+ obj, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ n, err = obj.Seek(int64(offset), 0)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ }
+ if n != int64(offset) {
+ failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(offset))+" got "+string(n), err).Fatal()
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ if n != int64(length-offset) {
+ failureLog(function, args, startTime, "", "Invalid offset returned, expected "+string(int64(length-offset))+" got "+string(n), err).Fatal()
+ }
+
+ if err = c.RemoveObject(bucketName, objectName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketErrorV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-west-1'.
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
+ failureLog(function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err).Fatal()
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ failureLog(function, args, startTime, "", "Invalid error returned by server", err).Fatal()
+ }
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwiceV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+ }
+ if err := r.Close(); err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ if err := r.Close(); err == nil {
+ failureLog(function, args, startTime, "", "Object is already closed, should return error", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploadedV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, 128*1024)
+ if cerr != nil {
+ failureLog(function, args, startTime, "", "Copy failed", cerr).Fatal()
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
+ if err == nil {
+ failureLog(function, args, startTime, "", "PutObject should fail", err).Fatal()
+ }
+ if err.Error() != "proactively closed to be verified later" {
+ failureLog(function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err).Fatal()
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveIncompleteUpload failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests FPutObject hidden contentType setting
+func testFPutObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "FPutObject(bucketName, objectName, fileName, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "contentType": "application/octet-stream",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ failureLog(function, args, startTime, "", "TempFile creation failed", err).Fatal()
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ if n != int64(11*1024*1024) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ failureLog(function, args, startTime, "", "File close failed", err).Fatal()
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
+ args["fileName"] = file.Name()
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(11*1024*1024) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(11*1024*1024) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "Rename failed", err).Fatal()
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+ args["fileName"] = fileName + ".gtar"
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "FPutObject failed", err).Fatal()
+ }
+ if n != int64(11*1024*1024) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err).Fatal()
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err).Fatal()
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err).Fatal()
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ failureLog(function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err).Fatal()
+ }
+
+ // Remove all objects and bucket and temp file
+ err = c.RemoveObject(bucketName, objectName+"-standard")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-Octet")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName+"-GTar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegionsV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ if err = c.RemoveBucket(bucketName); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
+ args["bucketName"] = bucketName + ".withperiod"
+ args["region"] = "us-west-2"
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Remove the newly created bucket.
+ if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data.
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(st.Size), err).Fatal()
+ }
+
+ offset := int64(2048)
+ n, err = r.Seek(offset, 0)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ }
+ if n != offset {
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+ }
+ n, err = r.Seek(0, 1)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ }
+ if n != offset {
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err).Fatal()
+ }
+ _, err = r.Seek(offset, 2)
+ if err == nil {
+ failureLog(function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err).Fatal()
+ }
+ n, err = r.Seek(-offset, 2)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ }
+ if n != st.Size-offset {
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err).Fatal()
+ }
+
+ var buffer1 bytes.Buffer
+ if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ }
+ if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ }
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Seek failed", err).Fatal()
+ }
+ if n != (offset - 1) {
+ failureLog(function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err).Fatal()
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "Copy failed", err).Fatal()
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ failureLog(function, args, startTime, "", "Incorrect read bytes v/s original buffer", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(n), err).Fatal()
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ if st.Size != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(thirtyThreeKiB)+" got "+string(st.Size), err).Fatal()
+ }
+
+ offset := int64(2048)
+
+ // Read directly
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ m, err := r.ReadAt(buf2, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf2) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf3) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ if m != len(buf4) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ failureLog(function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err).Fatal()
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ }
+ if m != len(buf5) {
+ failureLog(function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err).Fatal()
+ }
+ if !bytes.Equal(buf, buf5) {
+ failureLog(function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err).Fatal()
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ failureLog(function, args, startTime, "", "ReadAt failed", err).Fatal()
+ }
+ }
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{
+ "destination": "",
+ "source": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate 33K of data.
+ var reader = getDataReader("datafile-33-kB", thirtyThreeKiB)
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, reader, "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(thirtyThreeKiB) {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(thirtyThreeKiB))+" got "+string(n), err).Fatal()
+ }
+
+ r, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+
+ // Copy Source
+ src := minio.NewSourceInfo(bucketName, objectName, nil)
+
+ // Set copy conditions.
+
+ // All invalid conditions first.
+ err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetMatchETagCond("")
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err).Fatal()
+ }
+ err = src.SetMatchETagExceptCond("")
+ if err == nil {
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err).Fatal()
+ }
+
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetModifiedSinceCond failed", err).Fatal()
+ }
+ err = src.SetMatchETagCond(objInfo.ETag)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetMatchETagCond failed", err).Fatal()
+ }
+ args["source"] = src
+
+ dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+ args["destination"] = dst
+
+ // Perform the Copy
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ }
+
+ // Source object
+ r, err = c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ failureLog(function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err).Fatal()
+ }
+
+ // CopyObject again but with wrong conditions
+ src = minio.NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetUnmodifiedSinceCond failed", err).Fatal()
+ }
+ err = src.SetMatchETagExceptCond(objInfo.ETag)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetMatchETagExceptCond failed", err).Fatal()
+ }
+
+ // Perform the Copy which should fail
+ err = c.CopyObject(dst, src)
+ if err == nil {
+ failureLog(function, args, startTime, "", "CopyObject did not fail for invalid conditions", err).Fatal()
+ }
+
+ // Remove all objects and buckets
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+
+ err = c.RemoveBucket(bucketName + "-copy")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+func testComposeObjectErrorCasesWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCasesWrapper(minioClient)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Test that more than 10K source objects cannot be
+ // concatenated.
+ srcArr := [10001]minio.SourceInfo{}
+ srcSlice := srcArr[:]
+ dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ if err := c.ComposeObject(dst, srcSlice); err == nil {
+ failureLog(function, args, startTime, "", "Expected error in ComposeObject", err).Fatal()
+ } else if err.Error() != "There must be as least one and up to 10000 source objects." {
+ failureLog(function, args, startTime, "", "Got unexpected error", err).Fatal()
+ }
+
+ // Create a source with invalid offset spec and check that
+ // error is returned:
+ // 1. Create the source object.
+ const badSrcSize = 5 * 1024 * 1024
+ buf := bytes.Repeat([]byte("1"), badSrcSize)
+ _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+ // 2. Set invalid range spec on the object (going beyond
+ // object size)
+ badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
+ err = badSrc.SetRange(1, badSrcSize)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Setting NewSourceInfo failed", err).Fatal()
+ }
+ // 3. ComposeObject call should fail.
+ if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
+ failureLog(function, args, startTime, "", "ComposeObject expected to fail", err).Fatal()
+ } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
+ failureLog(function, args, startTime, "", "Got invalid error", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCasesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCasesV2()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+func testComposeMultipleSources(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "ComposeObject(destination, sources)"
+ args := map[string]interface{}{
+ "destination": "",
+ "sources": "",
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Upload a small source object
+ const srcSize = 1024 * 1024 * 5
+ buf := bytes.Repeat([]byte("1"), srcSize)
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ // We will append 10 copies of the object.
+ srcs := []minio.SourceInfo{}
+ for i := 0; i < 10; i++ {
+ srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil))
+ }
+ // make the last part very small
+ err = srcs[9].SetRange(0, 0)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetRange failed", err).Fatal()
+ }
+ args["sources"] = srcs
+
+ dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
+ args["destination"] = dst
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+ err = c.ComposeObject(dst, srcs)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ }
+
+ objProps, err := c.StatObject(bucketName, "dstObject")
+ if err != nil {
+ failureLog(function, args, startTime, "", "StatObject failed", err).Fatal()
+ }
+
+ if objProps.Size != 9*srcSize+1 {
+ failureLog(function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test concatenating multiple objects objects
+func testCompose10KSourcesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testCompose10KSourcesV2(minioClient)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ testComposeMultipleSources(c)
+}
+
+func testEncryptedCopyObjectWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObjectWrapper(minioClient)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
+ key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(map[string][]string)
+ for k, v := range key1.GetSSEHeaders() {
+ metadata[k] = append(metadata[k], v)
+ }
+ _, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ }
+
+ // 2. copy object and change encryption key
+ src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
+ dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ }
+
+ // 3. get copied object and check if content is equal
+ reqH := minio.NewGetReqHeaders()
+ for k, v := range key2.GetSSEHeaders() {
+ reqH.Set(k, v)
+ }
+ coreClient := minio.Core{c}
+ reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+ defer reader.Close()
+
+ decBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+ if !bytes.Equal(decBytes, buf) {
+ failureLog(function, args, startTime, "", "Downloaded object mismatched for encrypted object", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObject()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c)
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testEncryptedCopyObjectV2()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio v2 client object creation failed", err).Fatal()
+ }
+
+ testEncryptedCopyObjectWrapper(c)
+}
+
+func testUserMetadataCopying() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testUserMetadataCopying()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+func testUserMetadataCopyingWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{
+ "destination": "",
+ "source": "",
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(bucketName, object)
+ if err != nil {
+ failureLog(function, args, startTime, "", "Stat failed", err).Fatal()
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ // 1. create a client encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(http.Header)
+ metadata.Set("x-amz-meta-myheader", "myvalue")
+ _, err = c.PutObjectWithMetadata(bucketName, "srcObject",
+ bytes.NewReader(buf), metadata, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectWithMetadata failed", err).Fatal()
+ }
+ if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ }
+
+ // 2. create source
+ src := minio.NewSourceInfo(bucketName, "srcObject", nil)
+ // 2.1 create destination with metadata set
+ dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ // 3. Check that copying to an object with metadata set resets
+ // the headers on the copy.
+ err = c.CopyObject(dst1, src)
+ args["destination"] = dst1
+ args["source"] = src
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ }
+
+ expectedHeaders := make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ }
+
+ // 4. create destination with no metadata set and same source
+ dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+
+ }
+ src = minio.NewSourceInfo(bucketName, "srcObject", nil)
+
+ // 5. Check that copying to an object with no metadata set,
+ // copies metadata.
+ err = c.CopyObject(dst2, src)
+ args["destination"] = dst2
+ args["source"] = src
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "CopyObject failed", err).Fatal()
+ }
+
+ expectedHeaders = metadata
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ }
+
+ // 6. Compose a pair of sources.
+ srcs := []minio.SourceInfo{
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ err = c.ComposeObject(dst3, srcs)
+ function = "ComposeObject(destination, sources)"
+ args["destination"] = dst3
+ args["source"] = srcs
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ }
+
+ // Check that no headers are copied in this case
+ if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ }
+
+ // 7. Compose a pair of sources with dest user metadata set.
+ srcs = []minio.SourceInfo{
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ failureLog(function, args, startTime, "", "NewDestinationInfo failed", err).Fatal()
+ }
+
+ err = c.ComposeObject(dst4, srcs)
+ function = "ComposeObject(destination, sources)"
+ args["destination"] = dst4
+ args["source"] = srcs
+
+ if err != nil {
+ failureLog(function, args, startTime, "", "ComposeObject failed", err).Fatal()
+ }
+
+ // Check that no headers are copied in this case
+ expectedHeaders = make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
+ failureLog(function, args, startTime, "", "Metadata match failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+func testUserMetadataCopyingV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testUserMetadataCopyingV2()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+// Test put object with size -1 byte object.
+func testPutObjectNoLengthV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": -1,
+ "metadata": nil,
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+ "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+
+ // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
+ // Use different data for each part for multipart tests to ensure part order at the end.
+ var reader = getDataReader("datafile-65-MB", sixtyFiveMiB)
+ defer reader.Close()
+
+ // Upload an object.
+ n, err := c.PutObjectWithSize(bucketName, objectName, reader, -1, nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ }
+ if n != int64(sixtyFiveMiB) {
+ failureLog(function, args, startTime, "", "Expected upload object size "+string(sixtyFiveMiB)+" got "+string(n), err).Fatal()
+ }
+
+ // Remove the object.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ // Remove the bucket.
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test put objects of unknown size.
+func testPutObjectsUnknownV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectStreaming(bucketName, objectName, reader)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+ "minio-go-test")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Issues are revealed by trying to upload multiple files of unknown size
+ // sequentially (on 4GB machines)
+ for i := 1; i <= 4; i++ {
+ // Simulate that we could be receiving byte slices of data that we want
+ // to upload as a file
+ rpipe, wpipe := io.Pipe()
+ defer rpipe.Close()
+ go func() {
+ b := []byte("test")
+ wpipe.Write(b)
+ wpipe.Close()
+ }()
+
+ // Upload the object.
+ objectName := fmt.Sprintf("%sunique%d", bucketName, i)
+ args["objectName"] = objectName
+
+ n, err := c.PutObjectStreaming(bucketName, objectName, rpipe)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectStreaming failed", err).Fatal()
+ }
+ if n != int64(4) {
+ failureLog(function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err).Fatal()
+ }
+
+ // Remove the object.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ }
+
+ // Remove the bucket.
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test put object with 0 byte object.
+func testPutObject0ByteV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "PutObjectWithSize(bucketName, objectName, reader, size, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": 0,
+ "metadata": nil,
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
+ "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ objectName := bucketName + "unique"
+
+ // Upload an object.
+ n, err := c.PutObjectWithSize(bucketName, objectName, bytes.NewReader([]byte("")), 0, nil, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObjectWithSize failed", err).Fatal()
+ }
+ if n != 0 {
+ failureLog(function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err).Fatal()
+ }
+
+ // Remove the object.
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+
+ // Remove the bucket.
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCases() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testComposeObjectErrorCases()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+// Test concatenating 10K objects
+func testCompose10KSources() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testCompose10KSources()"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client object creation failed", err).Fatal()
+ }
+
+ testComposeMultipleSources(c)
+}
+
+// Tests comprehensive list of all methods.
+func testFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ function := "testFunctionalV2()"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ failureLog(function, args, startTime, "", "Minio client v2 object creation failed", err).Fatal()
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ failureLog(function, args, startTime, "", "MakeBucket failed", err).Fatal()
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "file create failed", err).Fatal()
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ failureLog(function, args, startTime, "", "file write failed", err).Fatal()
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ exists, err = c.BucketExists(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "BucketExists failed", err).Fatal()
+ }
+ if !exists {
+ failureLog(function, args, startTime, "", "Could not find existing bucket "+bucketName, err).Fatal()
+ }
+
+ // Make the bucket 'public read/write'.
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ if err != nil {
+ failureLog(function, args, startTime, "", "SetBucketPolicy failed", err).Fatal()
+ }
+
+ // List all buckets.
+ buckets, err := c.ListBuckets()
+ if len(buckets) == 0 {
+ failureLog(function, args, startTime, "", "List buckets cannot be empty", err).Fatal()
+ }
+ if err != nil {
+ failureLog(function, args, startTime, "", "ListBuckets failed", err).Fatal()
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ failureLog(function, args, startTime, "", "Bucket "+bucketName+"not found", err).Fatal()
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+ if n != int64(len(buf)) {
+ failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
+ if err != nil {
+ failureLog(function, args, startTime, "", "PutObject failed", err).Fatal()
+ }
+
+ if n != int64(len(buf)) {
+ failureLog(function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err).Fatal()
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ failureLog(function, args, startTime, "", "Could not find existing object "+objectName, err).Fatal()
+ }
+
+ incompObjNotFound := true
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ failureLog(function, args, startTime, "", "Unexpected dangling incomplete upload found", err).Fatal()
+ }
+
+ newReader, err := c.GetObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ }
+
+ err = c.FGetObject(bucketName, objectName, fileName+"-f")
+ if err != nil {
+ failureLog(function, args, startTime, "", "FgetObject failed", err).Fatal()
+ }
+
+ // Generate presigned HEAD object url.
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedHeadObject URL head request failed", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ }
+ if resp.Header.Get("ETag") == "" {
+ failureLog(function, args, startTime, "", "Got empty ETag", err).Fatal()
+ }
+ resp.Body.Close()
+
+ // Generate presigned GET object url.
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ // Generate presigned GET object url.
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject failed", err).Fatal()
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedGetObject URL GET request failed", err).Fatal()
+ }
+ if resp.StatusCode != http.StatusOK {
+ failureLog(function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err).Fatal()
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ }
+ // Verify content disposition.
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ failureLog(function, args, startTime, "", "wrong Content-Disposition received ", err).Fatal()
+ }
+
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ failureLog(function, args, startTime, "", "PresignedPutObject failed", err).Fatal()
+ }
+ // Generate data more than 32K
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ failureLog(function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err).Fatal()
+ }
+
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ failureLog(function, args, startTime, "", "GetObject failed", err).Fatal()
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ failureLog(function, args, startTime, "", "ReadAll failed", err).Fatal()
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ failureLog(function, args, startTime, "", "Bytes mismatch", err).Fatal()
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveObject(bucketName, objectName+"-f")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveObject failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ failureLog(function, args, startTime, "", "RemoveBucket failed", err).Fatal()
+ }
+ err = c.RemoveBucket(bucketName)
+ if err == nil {
+ failureLog(function, args, startTime, "", "RemoveBucket should fail as bucket does not exist", err).Fatal()
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ failureLog(function, args, startTime, "", "RemoveBucket failed with wrong error message", err).Fatal()
+ }
+ if err = os.Remove(fileName); err != nil {
+ failureLog(function, args, startTime, "", "File remove failed", err).Fatal()
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ failureLog(function, args, startTime, "", "File removes failed", err).Fatal()
+ }
+ successLogger(function, args, startTime).Info()
+}
+
+// Convert string to bool and always return false if any error
+func mustParseBool(str string) bool {
+ b, err := strconv.ParseBool(str)
+ if err != nil {
+ return false
+ }
+ return b
+}
+
+func main() {
+ // Output to stdout instead of the default stderr
+ log.SetOutput(os.Stdout)
+ // create custom formatter
+ mintFormatter := mintJSONFormatter{}
+ // set custom formatter
+ log.SetFormatter(&mintFormatter)
+ // log Info or above -- success cases are Info level, failures are Fatal level
+ log.SetLevel(log.InfoLevel)
+ // execute tests
+ if !isQuickMode() {
+ testMakeBucketErrorV2()
+ testGetObjectClosedTwiceV2()
+ testRemovePartiallyUploadedV2()
+ testFPutObjectV2()
+ testMakeBucketRegionsV2()
+ testGetObjectReadSeekFunctionalV2()
+ testGetObjectReadAtFunctionalV2()
+ testCopyObjectV2()
+ testFunctionalV2()
+ testComposeObjectErrorCasesV2()
+ testCompose10KSourcesV2()
+ testEncryptedCopyObjectV2()
+ testUserMetadataCopyingV2()
+ testPutObject0ByteV2()
+ testPutObjectNoLengthV2()
+ testPutObjectsUnknownV2()
+ testMakeBucketError()
+ testMakeBucketRegions()
+ testPutObjectWithMetadata()
+ testPutObjectReadAt()
+ testPutObjectStreaming()
+ testListPartiallyUploaded()
+ testGetObjectSeekEnd()
+ testGetObjectClosedTwice()
+ testRemoveMultipleObjects()
+ testRemovePartiallyUploaded()
+ testFPutObjectMultipart()
+ testFPutObject()
+ testGetObjectReadSeekFunctional()
+ testGetObjectReadAtFunctional()
+ testPresignedPostPolicy()
+ testCopyObject()
+ testEncryptionPutGet()
+ testComposeObjectErrorCases()
+ testCompose10KSources()
+ testUserMetadataCopying()
+ testEncryptedCopyObject()
+ testBucketNotification()
+ testFunctional()
+ testGetObjectObjectModified()
+ testPutObjectUploadSeekedObject()
+ } else {
+ testFunctional()
+ testFunctionalV2()
+ }
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b0e574408fb241a4d2f6b3abe9aab8cb2a9fa74
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go
@@ -0,0 +1,89 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "fmt"
+
+// A Chain will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The Chain provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the
+// Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error, collecting all errors from all providers.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvAWSS3{},
+// &credentials.EnvMinio{},
+// })
+//
+// // Usage of ChainCredentials.
+// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
+// if err != nil {
+// log.Fatalln(err)
+// }
+//
+type Chain struct {
+ Providers []Provider
+ curr Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return New(&Chain{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *Chain) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ } // Success.
+ c.curr = p
+ return creds, nil
+ }
+ c.curr = nil
+ return Value{}, fmt.Errorf("No valid providers found %v", errs)
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *Chain) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
new file mode 100644
index 0000000000000000000000000000000000000000..130746f4baf5b581002e336783a1f4b29051d329
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample
@@ -0,0 +1,17 @@
+{
+ "version": "8",
+ "hosts": {
+ "play": {
+ "url": "https://play.minio.io:9000",
+ "accessKey": "Q3AM3UQ867SPQQA43P2F",
+ "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ "api": "S3v2"
+ },
+ "s3": {
+ "url": "https://s3.amazonaws.com",
+ "accessKey": "accessKey",
+ "secretKey": "secret",
+ "api": "S3v4"
+ }
+ }
+}
\ No newline at end of file
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
new file mode 100644
index 0000000000000000000000000000000000000000..cc30005324102d2b08118df513ed6ca2b7dbc80b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go
@@ -0,0 +1,175 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Signature Type.
+ SignerType SignatureType
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type IAMCredentialProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// Credentials - A container for synchronous safe retrieval of credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ sync.Mutex
+
+ creds Value
+ forceRefresh bool
+ provider Provider
+}
+
+// New returns a pointer to a new Credentials with the provider set.
+func New(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be refreshed.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.Lock()
+ defer c.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
new file mode 100644
index 0000000000000000000000000000000000000000..7fc91d9d2047bc69b01fa0364a0cd64daa0c9923
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..fa1908aebc2ee0e833d976f7da935a4a802aa12c
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go
@@ -0,0 +1,45 @@
+// Package credentials provides credential retrieval and management
+// for S3 compatible object storage.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewFromEnv()
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewFromIAM("")
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
new file mode 100644
index 0000000000000000000000000000000000000000..11934433c5d0a066cd17f63bbb6671d5b422d653
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go
@@ -0,0 +1,71 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvAWS retrieves credentials from the environment variables of the
+// running process. EnvAWSironment credentials never expire.
+//
+// EnvAWSironment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
+// * Secret Token: AWS_SESSION_TOKEN.
+type EnvAWS struct {
+ retrieved bool
+}
+
+// NewEnvAWS returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvAWS() *Credentials {
+ return New(&EnvAWS{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvAWS) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvAWS) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
new file mode 100644
index 0000000000000000000000000000000000000000..791087ef5603a38b238fb6737826f356b4643bfb
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go
@@ -0,0 +1,62 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "os"
+
+// A EnvMinio retrieves credentials from the environment variables of the
+// running process. EnvMinioironment credentials never expire.
+//
+// EnvMinioironment variables used:
+//
+// * Access Key ID: MINIO_ACCESS_KEY.
+// * Secret Access Key: MINIO_SECRET_KEY.
+type EnvMinio struct {
+ retrieved bool
+}
+
+// NewEnvMinio returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvMinio() *Credentials {
+ return New(&EnvMinio{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvMinio) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("MINIO_ACCESS_KEY")
+ secret := os.Getenv("MINIO_SECRET_KEY")
+
+ signerType := SignatureV4
+ if id == "" || secret == "" {
+ signerType = SignatureAnonymous
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SignerType: signerType,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvMinio) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
new file mode 100644
index 0000000000000000000000000000000000000000..1be6213851d92473a772e058cebaaa2e7892e4c6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go
@@ -0,0 +1,120 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+ homedir "github.com/minio/go-homedir"
+)
+
+// A FileAWSCredentials retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type FileAWSCredentials struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileAWSCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewFileAWSCredentials(filename string, profile string) *Credentials {
+ return New(&FileAWSCredentials{
+ filename: filename,
+ profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileAWSCredentials) Retrieve() (Value, error) {
+ if p.filename == "" {
+ p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
+ if p.filename == "" {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+ }
+ if p.profile == "" {
+ p.profile = os.Getenv("AWS_PROFILE")
+ if p.profile == "" {
+ p.profile = "default"
+ }
+ }
+
+ p.retrieved = false
+
+ iniProfile, err := loadProfile(p.filename, p.profile)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Default to empty string if not found.
+ id := iniProfile.Key("aws_access_key_id")
+ // Default to empty string if not found.
+ secret := iniProfile.Key("aws_secret_access_key")
+ // Default to empty string if not found.
+ token := iniProfile.Key("aws_session_token")
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileAWSCredentials) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (*ini.Section, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return nil, err
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return nil, err
+ }
+ return iniProfile, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
new file mode 100644
index 0000000000000000000000000000000000000000..9e26dd30227b33a3bd5455ddb198fb3b94c4bc48
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go
@@ -0,0 +1,129 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ homedir "github.com/minio/go-homedir"
+)
+
+// A FileMinioClient retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Configuration file example: $HOME/.mc/config.json
+type FileMinioClient struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.mc/config.json"
+ // Windows: "%USERALIAS%\mc\config.json"
+ filename string
+
+ // Minio Alias to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "MINIO_ALIAS" or "default" if
+ // environment variable is also not set.
+ alias string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileMinioClient returns a pointer to a new Credentials object
+// wrapping the Alias file provider.
+func NewFileMinioClient(filename string, alias string) *Credentials {
+ return New(&FileMinioClient{
+ filename: filename,
+ alias: alias,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *FileMinioClient) Retrieve() (Value, error) {
+ if p.filename == "" {
+ homeDir, err := homedir.Dir()
+ if err != nil {
+ return Value{}, err
+ }
+ p.filename = filepath.Join(homeDir, ".mc", "config.json")
+ if runtime.GOOS == "windows" {
+ p.filename = filepath.Join(homeDir, "mc", "config.json")
+ }
+ }
+
+ if p.alias == "" {
+ p.alias = os.Getenv("MINIO_ALIAS")
+ if p.alias == "" {
+ p.alias = "s3"
+ }
+ }
+
+ p.retrieved = false
+
+ hostCfg, err := loadAlias(p.filename, p.alias)
+ if err != nil {
+ return Value{}, err
+ }
+
+ p.retrieved = true
+ return Value{
+ AccessKeyID: hostCfg.AccessKey,
+ SecretAccessKey: hostCfg.SecretKey,
+ SignerType: parseSignatureType(hostCfg.API),
+ }, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *FileMinioClient) IsExpired() bool {
+ return !p.retrieved
+}
+
+// hostConfig configuration of a host.
+type hostConfig struct {
+ URL string `json:"url"`
+ AccessKey string `json:"accessKey"`
+ SecretKey string `json:"secretKey"`
+ API string `json:"api"`
+}
+
+// config config version.
+type config struct {
+ Version string `json:"version"`
+ Hosts map[string]hostConfig `json:"hosts"`
+}
+
+// loadAliass loads from the file pointed to by shared credentials filename for alias.
+// The credentials retrieved from the alias will be returned or error. Error will be
+// returned if it fails to read from the file.
+func loadAlias(filename, alias string) (hostConfig, error) {
+ cfg := &config{}
+ configBytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return hostConfig{}, err
+ }
+ if err = json.Unmarshal(configBytes, cfg); err != nil {
+ return hostConfig{}, err
+ }
+ return cfg.Hosts[alias], nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
new file mode 100644
index 0000000000000000000000000000000000000000..b862cf538d4e9f072ebeda3dc73f26cdb539c93b
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go
@@ -0,0 +1,227 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "net/http"
+ "net/url"
+ "path"
+ "time"
+)
+
+// DefaultExpiryWindow - Default expiry window.
+// ExpiryWindow will allow the credentials to trigger refreshing
+// prior to the credentials actually expiring. This is beneficial
+// so race conditions with expiring credentials do not cause
+// request to fail unexpectedly due to ExpiredTokenException exceptions.
+const DefaultExpiryWindow = time.Second * 10 // 10 secs
+
+// A IAM retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+type IAM struct {
+ Expiry
+
+ // Required http Client to use when connecting to IAM metadata service.
+ Client *http.Client
+
+ // Custom endpoint to fetch IAM role credentials.
+ endpoint string
+}
+
+// redirectHeaders copies all headers when following a redirect URL.
+// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
+func redirectHeaders(req *http.Request, via []*http.Request) error {
+ if len(via) == 0 {
+ return nil
+ }
+ for key, val := range via[0].Header {
+ req.Header[key] = val
+ }
+ return nil
+}
+
+// IAM Roles for Amazon EC2
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+const (
+ defaultIAMRoleEndpoint = "http://169.254.169.254"
+ defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials"
+)
+
+// NewIAM returns a pointer to a new Credentials object wrapping
+// the IAM. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewIAM(endpoint string) *Credentials {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ p := &IAM{
+ Client: &http.Client{
+ Transport: http.DefaultTransport,
+ CheckRedirect: redirectHeaders,
+ },
+ endpoint: endpoint,
+ }
+ return New(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired
+func (m *IAM) Retrieve() (Value, error) {
+ roleCreds, err := getCredentials(m.Client, m.endpoint)
+ if err != nil {
+ return Value{}, err
+ }
+
+ // Expiry window is set to 10secs.
+ m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
+
+ return Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ SignerType: SignatureV4,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+
+ // Unused params.
+ LastUpdated time.Time
+ Type string
+}
+
+// Get the final IAM role URL where the request will
+// be sent to fetch the rolling access credentials.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func getIAMRoleURL(endpoint string) (*url.URL, error) {
+ if endpoint == "" {
+ endpoint = defaultIAMRoleEndpoint
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = defaultIAMSecurityCredsPath
+ return u, nil
+}
+
+// listRoleNames lists of credential role names associated
+// with the current EC2 service. If there are no credentials,
+// or there is an error making or receiving the request.
+// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return nil, errors.New(resp.Status)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(resp.Body)
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ return credsList, nil
+}
+
+// getCredentials - obtains the credentials from the IAM role name associated with
+// the current EC2 service.
+//
+// If the credentials cannot be found, or there is an error
+// reading the response an error will be returned.
+func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ u, err := getIAMRoleURL(endpoint)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ roleNames, err := listRoleNames(client, u)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if len(roleNames) == 0 {
+ return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
+ }
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // - An instance profile can contain only one IAM role. This limit cannot be increased.
+ roleName := roleNames[0]
+
+ // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
+ // The following command retrieves the security credentials for an
+ // IAM role named `s3access`.
+ //
+ // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
+ //
+ u.Path = path.Join(u.Path, roleName)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return ec2RoleCredRespBody{}, errors.New(resp.Status)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{}, err
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
new file mode 100644
index 0000000000000000000000000000000000000000..c64ad6c234bed3bcbb48f6d902d684a43cf40965
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go
@@ -0,0 +1,76 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import "strings"
+
+// SignatureType is type of Authorization requested for a given HTTP request.
+type SignatureType int
+
+// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
+const (
+ // SignatureDefault is always set to v4.
+ SignatureDefault SignatureType = iota
+ SignatureV4
+ SignatureV2
+ SignatureV4Streaming
+ SignatureAnonymous // Anonymous signature signifies, no signature.
+)
+
+// IsV2 - is signature SignatureV2?
+func (s SignatureType) IsV2() bool {
+ return s == SignatureV2
+}
+
+// IsV4 - is signature SignatureV4?
+func (s SignatureType) IsV4() bool {
+ return s == SignatureV4 || s == SignatureDefault
+}
+
+// IsStreamingV4 - is signature SignatureV4Streaming?
+func (s SignatureType) IsStreamingV4() bool {
+ return s == SignatureV4Streaming
+}
+
+// IsAnonymous - is signature empty?
+func (s SignatureType) IsAnonymous() bool {
+ return s == SignatureAnonymous
+}
+
+// Stringer humanized version of signature type,
+// strings returned here are case insensitive.
+func (s SignatureType) String() string {
+ if s.IsV2() {
+ return "S3v2"
+ } else if s.IsV4() {
+ return "S3v4"
+ } else if s.IsStreamingV4() {
+ return "S3v4Streaming"
+ }
+ return "Anonymous"
+}
+
+func parseSignatureType(str string) SignatureType {
+ if strings.EqualFold(str, "S3v4") {
+ return SignatureV4
+ } else if strings.EqualFold(str, "S3v2") {
+ return SignatureV2
+ } else if strings.EqualFold(str, "S3v4Streaming") {
+ return SignatureV4Streaming
+ }
+ return SignatureAnonymous
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
new file mode 100644
index 0000000000000000000000000000000000000000..25aff569654cd8116ab1532390bc78d57689df61
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go
@@ -0,0 +1,67 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+// A Static is a set of credentials which are set programmatically,
+// and will never expire.
+type Static struct {
+ Value
+}
+
+// NewStaticV2 returns a pointer to a new Credentials object
+// wrapping a static credentials value provider, signature is
+// set to v2. If access and secret are not specified then
+// regardless of signature type set it Value will return
+// as anonymous.
+func NewStaticV2(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV2)
+}
+
+// NewStaticV4 is similar to NewStaticV2 with similar considerations.
+func NewStaticV4(id, secret, token string) *Credentials {
+ return NewStatic(id, secret, token, SignatureV4)
+}
+
+// NewStatic returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
+ return New(&Static{
+ Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ SignerType: signerType,
+ },
+ })
+}
+
+// Retrieve returns the static credentials.
+func (s *Static) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ // Anonymous is not an error
+ return Value{SignerType: SignatureAnonymous}, nil
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For Static, the credentials never expired.
+func (s *Static) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
new file mode 100644
index 0000000000000000000000000000000000000000..be45e52f45e5a033113f30f7919c8580927aa3ae
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go
@@ -0,0 +1,293 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rand"
+ "encoding/base64"
+ "errors"
+ "io"
+)
+
+// Crypt mode - encryption or decryption
+type cryptMode int
+
+const (
+ encryptMode cryptMode = iota
+ decryptMode
+)
+
+// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm
+type CBCSecureMaterials struct {
+
+ // Data stream to encrypt/decrypt
+ stream io.Reader
+
+ // Last internal error
+ err error
+
+ // End of file reached
+ eof bool
+
+ // Holds initial data
+ srcBuf *bytes.Buffer
+
+ // Holds transformed data (encrypted or decrypted)
+ dstBuf *bytes.Buffer
+
+ // Encryption algorithm
+ encryptionKey Key
+
+ // Key to encrypts/decrypts data
+ contentKey []byte
+
+ // Encrypted form of contentKey
+ cryptedKey []byte
+
+ // Initialization vector
+ iv []byte
+
+ // matDesc - currently unused
+ matDesc []byte
+
+ // Indicate if we are going to encrypt or decrypt
+ cryptMode cryptMode
+
+ // Helper that encrypts/decrypts data
+ blockMode cipher.BlockMode
+}
+
+// NewCBCSecureMaterials builds new CBC crypter module with
+// the specified encryption key (symmetric or asymmetric)
+func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) {
+ if key == nil {
+ return nil, errors.New("Unable to recognize empty encryption properties")
+ }
+ return &CBCSecureMaterials{
+ srcBuf: bytes.NewBuffer([]byte{}),
+ dstBuf: bytes.NewBuffer([]byte{}),
+ encryptionKey: key,
+ matDesc: []byte("{}"),
+ }, nil
+
+}
+
+// Close implements closes the internal stream.
+func (s *CBCSecureMaterials) Close() error {
+ closer, ok := s.stream.(io.Closer)
+ if ok {
+ return closer.Close()
+ }
+ return nil
+}
+
+// SetupEncryptMode - tells CBC that we are going to encrypt data
+func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error {
+ // Set mode to encrypt
+ s.cryptMode = encryptMode
+
+ // Set underlying reader
+ s.stream = stream
+
+ s.eof = false
+ s.srcBuf.Reset()
+ s.dstBuf.Reset()
+
+ var err error
+
+ // Generate random content key
+ s.contentKey = make([]byte, aes.BlockSize*2)
+ if _, err := rand.Read(s.contentKey); err != nil {
+ return err
+ }
+ // Encrypt content key
+ s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey)
+ if err != nil {
+ return err
+ }
+ // Generate random IV
+ s.iv = make([]byte, aes.BlockSize)
+ if _, err = rand.Read(s.iv); err != nil {
+ return err
+ }
+ // New cipher
+ encryptContentBlock, err := aes.NewCipher(s.contentKey)
+ if err != nil {
+ return err
+ }
+
+ s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv)
+
+ return nil
+}
+
+// SetupDecryptMode - tells CBC that we are going to decrypt data
+func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error {
+ // Set mode to decrypt
+ s.cryptMode = decryptMode
+
+ // Set underlying reader
+ s.stream = stream
+
+ // Reset
+ s.eof = false
+ s.srcBuf.Reset()
+ s.dstBuf.Reset()
+
+ var err error
+
+ // Get IV
+ s.iv, err = base64.StdEncoding.DecodeString(iv)
+ if err != nil {
+ return err
+ }
+
+ // Get encrypted content key
+ s.cryptedKey, err = base64.StdEncoding.DecodeString(key)
+ if err != nil {
+ return err
+ }
+
+ // Decrypt content key
+ s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey)
+ if err != nil {
+ return err
+ }
+
+ // New cipher
+ decryptContentBlock, err := aes.NewCipher(s.contentKey)
+ if err != nil {
+ return err
+ }
+
+ s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv)
+ return nil
+}
+
+// GetIV - return randomly generated IV (per S3 object), base64 encoded.
+func (s *CBCSecureMaterials) GetIV() string {
+ return base64.StdEncoding.EncodeToString(s.iv)
+}
+
+// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded.
+func (s *CBCSecureMaterials) GetKey() string {
+ return base64.StdEncoding.EncodeToString(s.cryptedKey)
+}
+
+// GetDesc - user provided encryption material description in JSON (UTF8) format.
+func (s *CBCSecureMaterials) GetDesc() string {
+ return string(s.matDesc)
+}
+
+// Fill buf with encrypted/decrypted data
+func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) {
+ // Always fill buf from bufChunk at the end of this function
+ defer func() {
+ if s.err != nil {
+ n, err = 0, s.err
+ } else {
+ n, err = s.dstBuf.Read(buf)
+ }
+ }()
+
+ // Return
+ if s.eof {
+ return
+ }
+
+ // Fill dest buffer if its length is less than buf
+ for !s.eof && s.dstBuf.Len() < len(buf) {
+
+ srcPart := make([]byte, aes.BlockSize)
+ dstPart := make([]byte, aes.BlockSize)
+
+ // Fill src buffer
+ for s.srcBuf.Len() < aes.BlockSize*2 {
+ _, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize)
+ if err != nil {
+ break
+ }
+ }
+
+ // Quit immediately for errors other than io.EOF
+ if err != nil && err != io.EOF {
+ s.err = err
+ return
+ }
+
+ // Mark current encrypting/decrypting as finished
+ s.eof = (err == io.EOF)
+
+ if s.eof && s.cryptMode == encryptMode {
+ if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil {
+ s.err = err
+ return
+ }
+ } else {
+ _, _ = s.srcBuf.Read(srcPart)
+ }
+
+ // Crypt srcPart content
+ for len(srcPart) > 0 {
+
+ // Crypt current part
+ s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize])
+
+ // Unpad when this is the last part and we are decrypting
+ if s.eof && s.cryptMode == decryptMode {
+ dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize)
+ if err != nil {
+ s.err = err
+ return
+ }
+ }
+
+ // Send crypted data to dstBuf
+ if _, wErr := s.dstBuf.Write(dstPart); wErr != nil {
+ s.err = wErr
+ return
+ }
+ // Move to the next part
+ srcPart = srcPart[aes.BlockSize:]
+ }
+ }
+ return
+}
+
+// Unpad a set of bytes following PKCS5 algorithm
+func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) {
+ len := len(buf)
+ if len == 0 {
+ return nil, errors.New("buffer is empty")
+ }
+ pad := int(buf[len-1])
+ if pad > len || pad > blockSize {
+ return nil, errors.New("invalid padding size")
+ }
+ return buf[:len-pad], nil
+}
+
+// Pad a set of bytes following PKCS5 algorithm
+func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) {
+ len := len(buf)
+ pad := blockSize - (len % blockSize)
+ padText := bytes.Repeat([]byte{byte(pad)}, pad)
+ return append(buf, padText...), nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b8554336ad0178715feb4d35fed35eef099abf6
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go
@@ -0,0 +1,53 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package encrypt implements a generic interface to encrypt any stream of data.
+// currently this package implements two types of encryption
+// - Symmetric encryption using AES.
+// - Asymmetric encrytion using RSA.
+package encrypt
+
+import "io"
+
+// Materials - provides generic interface to encrypt any stream of data.
+type Materials interface {
+
+ // Closes the wrapped stream properly, initiated by the caller.
+ Close() error
+
+ // Returns encrypted/decrypted data, io.Reader compatible.
+ Read(b []byte) (int, error)
+
+ // Get randomly generated IV, base64 encoded.
+ GetIV() (iv string)
+
+ // Get content encrypting key (cek) in encrypted form, base64 encoded.
+ GetKey() (key string)
+
+ // Get user provided encryption material description in
+ // JSON (UTF8) format. This is not used, kept for future.
+ GetDesc() (desc string)
+
+ // Setup encrypt mode, further calls of Read() function
+ // will return the encrypted form of data streamed
+ // by the passed reader
+ SetupEncryptMode(stream io.Reader) error
+
+ // Setup decrypted mode, further calls of Read() function
+ // will return the decrypted form of data streamed
+ // by the passed reader
+ SetupDecryptMode(stream io.Reader, iv string, key string) error
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
new file mode 100644
index 0000000000000000000000000000000000000000..8814845e30ecf01c6672e515edb656e958e38a9a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go
@@ -0,0 +1,165 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package encrypt
+
+import (
+ "crypto/aes"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/x509"
+ "errors"
+)
+
+// Key - generic interface to encrypt/decrypt a key.
+// We use it to encrypt/decrypt content key which is the key
+// that encrypt/decrypt object data.
+type Key interface {
+ // Encrypt data using to the set encryption key
+ Encrypt([]byte) ([]byte, error)
+ // Decrypt data using to the set encryption key
+ Decrypt([]byte) ([]byte, error)
+}
+
+// SymmetricKey - encrypts data with a symmetric master key
+type SymmetricKey struct {
+ masterKey []byte
+}
+
+// Encrypt passed bytes
+func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) {
+ // Initialize an AES encryptor using a master key
+ keyBlock, err := aes.NewCipher(s.masterKey)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ // Pad the key before encryption
+ plain, _ = pkcs5Pad(plain, aes.BlockSize)
+
+ encKey := []byte{}
+ encPart := make([]byte, aes.BlockSize)
+
+ // Encrypt the passed key by block
+ for {
+ if len(plain) < aes.BlockSize {
+ break
+ }
+ // Encrypt the passed key
+ keyBlock.Encrypt(encPart, plain[:aes.BlockSize])
+ // Add the encrypted block to the total encrypted key
+ encKey = append(encKey, encPart...)
+ // Pass to the next plain block
+ plain = plain[aes.BlockSize:]
+ }
+ return encKey, nil
+}
+
+// Decrypt passed bytes
+func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
+ // Initialize AES decrypter
+ keyBlock, err := aes.NewCipher(s.masterKey)
+ if err != nil {
+ return nil, err
+ }
+
+ var plain []byte
+ plainPart := make([]byte, aes.BlockSize)
+
+ // Decrypt the encrypted data block by block
+ for {
+ if len(cipher) < aes.BlockSize {
+ break
+ }
+ keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize])
+ // Add the decrypted block to the total result
+ plain = append(plain, plainPart...)
+ // Pass to the next cipher block
+ cipher = cipher[aes.BlockSize:]
+ }
+
+ // Unpad the resulted plain data
+ plain, err = pkcs5Unpad(plain, aes.BlockSize)
+ if err != nil {
+ return nil, err
+ }
+
+ return plain, nil
+}
+
+// NewSymmetricKey generates a new encrypt/decrypt crypto using
+// an AES master key password
+func NewSymmetricKey(b []byte) *SymmetricKey {
+ return &SymmetricKey{masterKey: b}
+}
+
+// AsymmetricKey - struct which encrypts/decrypts data
+// using RSA public/private certificates
+type AsymmetricKey struct {
+ publicKey *rsa.PublicKey
+ privateKey *rsa.PrivateKey
+}
+
+// Encrypt data using public key
+func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) {
+ cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain)
+ if err != nil {
+ return nil, err
+ }
+ return cipher, nil
+}
+
+// Decrypt data using public key
+func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) {
+ cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher)
+ if err != nil {
+ return nil, err
+ }
+ return cipher, nil
+}
+
+// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt
+// data using a pair for private and public key
+func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) {
+ // Parse private key from passed data
+ priv, err := x509.ParsePKCS8PrivateKey(privData)
+ if err != nil {
+ return nil, err
+ }
+ privKey, ok := priv.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("not a valid private key")
+ }
+
+ // Parse public key from passed data
+ pub, err := x509.ParsePKIXPublicKey(pubData)
+ if err != nil {
+ return nil, err
+ }
+
+ pubKey, ok := pub.(*rsa.PublicKey)
+ if !ok {
+ return nil, errors.New("not a valid public key")
+ }
+
+ // Associate the private key with the passed public key
+ privKey.PublicKey = *pubKey
+
+ return &AsymmetricKey{
+ publicKey: pubKey,
+ privateKey: privKey,
+ }, nil
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
new file mode 100644
index 0000000000000000000000000000000000000000..078bcd1db684ffa279080a348e966de6610c2b0e
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go
@@ -0,0 +1,115 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import "github.com/minio/minio-go/pkg/set"
+
+// ConditionKeyMap - map of policy condition key and value.
+type ConditionKeyMap map[string]set.StringSet
+
+// Add - adds key and value. The value is appended If key already exists.
+func (ckm ConditionKeyMap) Add(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ ckm[key] = v.Union(value)
+ } else {
+ ckm[key] = set.CopyStringSet(value)
+ }
+}
+
+// Remove - removes value of given key. If key has empty after removal, the key is also removed.
+func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ if value != nil {
+ ckm[key] = v.Difference(value)
+ }
+
+ if ckm[key].IsEmpty() {
+ delete(ckm, key)
+ }
+ }
+}
+
+// RemoveKey - removes key and its value.
+func (ckm ConditionKeyMap) RemoveKey(key string) {
+ if _, ok := ckm[key]; ok {
+ delete(ckm, key)
+ }
+}
+
+// CopyConditionKeyMap - returns new copy of given ConditionKeyMap.
+func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap {
+ out := make(ConditionKeyMap)
+
+ for k, v := range condKeyMap {
+ out[k] = set.CopyStringSet(v)
+ }
+
+ return out
+}
+
+// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap.
+func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap {
+ out := CopyConditionKeyMap(condKeyMap1)
+
+ for k, v := range condKeyMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = ev.Union(v)
+ } else {
+ out[k] = set.CopyStringSet(v)
+ }
+ }
+
+ return out
+}
+
+// ConditionMap - map of condition and conditional values.
+type ConditionMap map[string]ConditionKeyMap
+
+// Add - adds condition key and condition value. The value is appended if key already exists.
+func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) {
+ if v, ok := cond[condKey]; ok {
+ cond[condKey] = mergeConditionKeyMap(v, condKeyMap)
+ } else {
+ cond[condKey] = CopyConditionKeyMap(condKeyMap)
+ }
+}
+
+// Remove - removes condition key and its value.
+func (cond ConditionMap) Remove(condKey string) {
+ if _, ok := cond[condKey]; ok {
+ delete(cond, condKey)
+ }
+}
+
+// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap.
+func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap {
+ out := make(ConditionMap)
+
+ for k, v := range condMap1 {
+ out[k] = CopyConditionKeyMap(v)
+ }
+
+ for k, v := range condMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = mergeConditionKeyMap(ev, v)
+ } else {
+ out[k] = CopyConditionKeyMap(v)
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
new file mode 100644
index 0000000000000000000000000000000000000000..b2d46e1786b4992e78ebd47052088c9140f3c131
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go
@@ -0,0 +1,634 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// BucketPolicy - Bucket level policy.
+type BucketPolicy string
+
+// Different types of Policies currently supported for buckets.
+const (
+ BucketPolicyNone BucketPolicy = "none"
+ BucketPolicyReadOnly = "readonly"
+ BucketPolicyReadWrite = "readwrite"
+ BucketPolicyWriteOnly = "writeonly"
+)
+
+// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
+func (p BucketPolicy) IsValidBucketPolicy() bool {
+ switch p {
+ case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
+ return true
+ }
+ return false
+}
+
+// Resource prefix for all aws resources.
+const awsResourcePrefix = "arn:aws:s3:::"
+
+// Common bucket actions for both read and write policies.
+var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation")
+
+// Read only bucket actions.
+var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket")
+
+// Write only bucket actions.
+var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads")
+
+// Read only object actions.
+var readOnlyObjectActions = set.CreateStringSet("s3:GetObject")
+
+// Write only object actions.
+var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject")
+
+// Read and write object actions.
+var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions)
+
+// All valid bucket and object actions.
+var validActions = commonBucketActions.
+ Union(readOnlyBucketActions).
+ Union(writeOnlyBucketActions).
+ Union(readOnlyObjectActions).
+ Union(writeOnlyObjectActions)
+
+var startsWithFunc = func(resource string, resourcePrefix string) bool {
+ return strings.HasPrefix(resource, resourcePrefix)
+}
+
+// User - canonical users list.
+type User struct {
+ AWS set.StringSet `json:"AWS,omitempty"`
+ CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"`
+}
+
+// Statement - minio policy statement
+type Statement struct {
+ Actions set.StringSet `json:"Action"`
+ Conditions ConditionMap `json:"Condition,omitempty"`
+ Effect string
+ Principal User `json:"Principal"`
+ Resources set.StringSet `json:"Resource"`
+ Sid string
+}
+
+// BucketAccessPolicy - minio policy collection
+type BucketAccessPolicy struct {
+ Version string // date in YYYY-MM-DD format
+ Statements []Statement `json:"Statement"`
+}
+
+// isValidStatement - returns whether given statement is valid to process for given bucket name.
+func isValidStatement(statement Statement, bucketName string) bool {
+ if statement.Actions.Intersection(validActions).IsEmpty() {
+ return false
+ }
+
+ if statement.Effect != "Allow" {
+ return false
+ }
+
+ if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") {
+ return false
+ }
+
+ bucketResource := awsResourcePrefix + bucketName
+ if statement.Resources.Contains(bucketResource) {
+ return true
+ }
+
+ if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() {
+ return false
+ }
+
+ return true
+}
+
+// Returns new statements with bucket actions for given policy.
+func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName)
+
+ statement := Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+
+ if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ if prefix != "" {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix))
+ condMap := make(ConditionMap)
+ condMap.Add("StringEquals", condKeyMap)
+ statement.Conditions = condMap
+ }
+ statements = append(statements, statement)
+ }
+
+ if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+ }
+
+ return statements
+}
+
+// Returns new statements contains object actions for given policy.
+func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ statement := Statement{
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"),
+ Sid: "",
+ }
+
+ if policy == BucketPolicyReadOnly {
+ statement.Actions = readOnlyObjectActions
+ } else if policy == BucketPolicyWriteOnly {
+ statement.Actions = writeOnlyObjectActions
+ } else if policy == BucketPolicyReadWrite {
+ statement.Actions = readWriteObjectActions
+ }
+
+ statements = append(statements, statement)
+ return statements
+}
+
+// Returns new statements for given policy, bucket and prefix.
+func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ ns := newBucketStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ ns = newObjectStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ return statements
+}
+
+// Returns whether given bucket statements are used by other than given prefix statements.
+func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) {
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ for _, s := range statements {
+ if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() {
+ if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnlyInUse = true
+ }
+
+ if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnlyInUse = true
+ }
+ }
+ if readOnlyInUse && writeOnlyInUse {
+ break
+ }
+ }
+
+ return readOnlyInUse, writeOnlyInUse
+}
+
+// Removes object actions in given statement.
+func removeObjectActions(statement Statement, objectResource string) Statement {
+ if statement.Conditions == nil {
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(objectResource)
+ } else {
+ statement.Actions = statement.Actions.Difference(readOnlyObjectActions)
+ statement.Actions = statement.Actions.Difference(writeOnlyObjectActions)
+ }
+ }
+
+ return statement
+}
+
+// Removes bucket actions for given policy in given statement.
+func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement {
+ removeReadOnly := func() {
+ if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ return
+ }
+
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ return
+ }
+
+ if prefix != "" {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+
+ values.Remove(prefix)
+
+ if stringEqualsValue != nil {
+ if values.IsEmpty() {
+ delete(stringEqualsValue, "s3:prefix")
+ }
+ if len(stringEqualsValue) == 0 {
+ delete(statement.Conditions, "StringEquals")
+ }
+ }
+
+ if len(statement.Conditions) == 0 {
+ statement.Conditions = nil
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ }
+ }
+ }
+
+ removeWriteOnly := func() {
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(writeOnlyBucketActions)
+ }
+ }
+
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(bucketResource)
+ } else {
+ if !readOnlyInUse {
+ removeReadOnly()
+ }
+
+ if !writeOnlyInUse {
+ removeWriteOnly()
+ }
+ }
+
+ return statement
+}
+
+// Returns statements containing removed actions/statements for given
+// policy, bucket name and prefix.
+func removeStatements(statements []Statement, bucketName string, prefix string) []Statement {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+ readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix)
+
+ out := []Statement{}
+ readOnlyBucketStatements := []Statement{}
+ s3PrefixValues := set.NewStringSet()
+
+ for _, statement := range statements {
+ if !isValidStatement(statement, bucketName) {
+ out = append(out, statement)
+ continue
+ }
+
+ if statement.Resources.Contains(bucketResource) {
+ if statement.Conditions != nil {
+ statement = removeBucketActions(statement, prefix, bucketResource, false, false)
+ } else {
+ statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse)
+ }
+ } else if statement.Resources.Contains(objectResource) {
+ statement = removeObjectActions(statement, objectResource)
+ }
+
+ if !statement.Actions.IsEmpty() {
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") {
+
+ if statement.Conditions != nil {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+ s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string {
+ return bucketResource + "/" + v + "*"
+ }))
+ } else if !s3PrefixValues.IsEmpty() {
+ readOnlyBucketStatements = append(readOnlyBucketStatements, statement)
+ continue
+ }
+ }
+ out = append(out, statement)
+ }
+ }
+
+ skipBucketStatement := true
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ for _, statement := range out {
+ if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() &&
+ s3PrefixValues.Intersection(statement.Resources).IsEmpty() {
+ skipBucketStatement = false
+ break
+ }
+ }
+
+ for _, statement := range readOnlyBucketStatements {
+ if skipBucketStatement &&
+ statement.Resources.Contains(bucketResource) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ continue
+ }
+
+ out = append(out, statement)
+ }
+
+ if len(out) == 1 {
+ statement := out[0]
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ out = []Statement{}
+ }
+ }
+
+ return out
+}
+
+// Appends given statement into statement list to have unique statements.
+// - If statement already exists in statement list, it ignores.
+// - If statement exists with different conditions, they are merged.
+// - Else the statement is appended to statement list.
+func appendStatement(statements []Statement, statement Statement) []Statement {
+ for i, s := range statements {
+ if s.Actions.Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Resources = s.Resources.Union(statement.Resources)
+ return statements
+ } else if s.Resources.Equals(statement.Resources) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Actions = s.Actions.Union(statement.Actions)
+ return statements
+ }
+
+ if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) &&
+ s.Actions.Intersection(statement.Actions).Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) {
+ if reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ return statements
+ }
+ if s.Conditions != nil && statement.Conditions != nil {
+ if s.Resources.Equals(statement.Resources) {
+ statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions)
+ return statements
+ }
+ }
+ }
+ }
+
+ if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) {
+ return append(statements, statement)
+ }
+
+ return statements
+}
+
+// Appends two statement lists.
+func appendStatements(statements []Statement, appendStatements []Statement) []Statement {
+ for _, s := range appendStatements {
+ statements = appendStatement(statements, s)
+ }
+
+ return statements
+}
+
+// Returns policy of given bucket statement.
+func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) {
+ if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) {
+ return commonFound, readOnly, writeOnly
+ }
+
+ if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Conditions == nil {
+ commonFound = true
+ }
+
+ if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) &&
+ statement.Conditions == nil {
+ writeOnly = true
+ }
+
+ if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ if prefix != "" && statement.Conditions != nil {
+ if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok {
+ if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok {
+ if s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok {
+ if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok {
+ if !s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ }
+ } else if prefix == "" && statement.Conditions == nil {
+ readOnly = true
+ } else if prefix != "" && statement.Conditions == nil {
+ readOnly = true
+ }
+ }
+
+ return commonFound, readOnly, writeOnly
+}
+
+// Returns policy of given object statement.
+func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
+ if statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnly = true
+ }
+ if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnly = true
+ }
+ }
+
+ return readOnly, writeOnly
+}
+
+// GetPolicy - Returns policy of given bucket name, prefix in given statements.
+func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ bucketCommonFound := false
+ bucketReadOnly := false
+ bucketWriteOnly := false
+ matchedResource := ""
+ objReadOnly := false
+ objWriteOnly := false
+
+ for _, s := range statements {
+ matchedObjResources := set.NewStringSet()
+ if s.Resources.Contains(objectResource) {
+ matchedObjResources.Add(objectResource)
+ } else {
+ matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource)
+ }
+
+ if !matchedObjResources.IsEmpty() {
+ readOnly, writeOnly := getObjectPolicy(s)
+ for resource := range matchedObjResources {
+ if len(matchedResource) < len(resource) {
+ objReadOnly = readOnly
+ objWriteOnly = writeOnly
+ matchedResource = resource
+ } else if len(matchedResource) == len(resource) {
+ objReadOnly = objReadOnly || readOnly
+ objWriteOnly = objWriteOnly || writeOnly
+ matchedResource = resource
+ }
+ }
+ } else if s.Resources.Contains(bucketResource) {
+ commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix)
+ bucketCommonFound = bucketCommonFound || commonFound
+ bucketReadOnly = bucketReadOnly || readOnly
+ bucketWriteOnly = bucketWriteOnly || writeOnly
+ }
+ }
+
+ policy := BucketPolicyNone
+ if bucketCommonFound {
+ if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly {
+ policy = BucketPolicyReadWrite
+ } else if bucketReadOnly && objReadOnly {
+ policy = BucketPolicyReadOnly
+ } else if bucketWriteOnly && objWriteOnly {
+ policy = BucketPolicyWriteOnly
+ }
+ }
+
+ return policy
+}
+
+// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
+func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
+ policyRules := map[string]BucketPolicy{}
+ objResources := set.NewStringSet()
+ // Search all resources related to objects policy
+ for _, s := range statements {
+ for r := range s.Resources {
+ if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
+ objResources.Add(r)
+ }
+ }
+ }
+ // Pretend that policy resource as an actual object and fetch its policy
+ for r := range objResources {
+ // Put trailing * if exists in asterisk
+ asterisk := ""
+ if strings.HasSuffix(r, "*") {
+ r = r[:len(r)-1]
+ asterisk = "*"
+ }
+ objectPath := r[len(awsResourcePrefix+bucketName)+1:]
+ p := GetPolicy(statements, bucketName, objectPath)
+ policyRules[bucketName+"/"+objectPath+asterisk] = p
+ }
+ return policyRules
+}
+
+// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
+func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
+ out := removeStatements(statements, bucketName, prefix)
+ // fmt.Println("out = ")
+ // printstatement(out)
+ ns := newStatements(policy, bucketName, prefix)
+ // fmt.Println("ns = ")
+ // printstatement(ns)
+
+ rv := appendStatements(out, ns)
+ // fmt.Println("rv = ")
+ // printstatement(rv)
+
+ return rv
+}
+
+// Match function matches wild cards in 'pattern' for resource.
+func resourceMatch(pattern, resource string) bool {
+ if pattern == "" {
+ return resource == pattern
+ }
+ if pattern == "*" {
+ return true
+ }
+ parts := strings.Split(pattern, "*")
+ if len(parts) == 1 {
+ return resource == pattern
+ }
+ tGlob := strings.HasSuffix(pattern, "*")
+ end := len(parts) - 1
+ if !strings.HasPrefix(resource, parts[0]) {
+ return false
+ }
+ for i := 1; i < end; i++ {
+ if !strings.Contains(resource, parts[i]) {
+ return false
+ }
+ idx := strings.Index(resource, parts[i]) + len(parts[i])
+ resource = resource[idx:]
+ }
+ return tGlob || strings.HasSuffix(resource, parts[end])
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
new file mode 100644
index 0000000000000000000000000000000000000000..d831436cd202326669013e517003ccbc191948e9
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go
@@ -0,0 +1,307 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3signer
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Reference for constants used below -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
+const (
+ streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
+ streamingEncoding = "aws-chunked"
+ streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
+ emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
+ payloadChunkSize = 64 * 1024
+ chunkSigConstLen = 17 // ";chunk-signature="
+ signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
+ crlfLen = 2 // CRLF
+)
+
+// Request headers to be ignored while calculating seed signature for
+// a request.
+var ignoredStreamingHeaders = map[string]bool{
+ "Authorization": true,
+ "User-Agent": true,
+ "Content-Type": true,
+}
+
+// getSignedChunkLength - calculates the length of chunk metadata
+func getSignedChunkLength(chunkDataSize int64) int64 {
+ return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
+ chunkSigConstLen +
+ signatureStrLen +
+ crlfLen +
+ chunkDataSize +
+ crlfLen
+}
+
+// getStreamLength - calculates the length of the overall stream (data + metadata)
+func getStreamLength(dataLen, chunkSize int64) int64 {
+ if dataLen <= 0 {
+ return 0
+ }
+
+ chunksCount := int64(dataLen / chunkSize)
+ remainingBytes := int64(dataLen % chunkSize)
+ streamLen := int64(0)
+ streamLen += chunksCount * getSignedChunkLength(chunkSize)
+ if remainingBytes > 0 {
+ streamLen += getSignedChunkLength(remainingBytes)
+ }
+ streamLen += getSignedChunkLength(0)
+ return streamLen
+}
+
+// buildChunkStringToSign - returns the string to sign given chunk data
+// and previous signature.
+func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
+ stringToSignParts := []string{
+ streamingPayloadHdr,
+ t.Format(iso8601DateFormat),
+ getScope(region, t),
+ previousSig,
+ emptySHA256,
+ hex.EncodeToString(sum256(chunkData)),
+ }
+
+ return strings.Join(stringToSignParts, "\n")
+}
+
+// prepareStreamingRequest - prepares a request with appropriate
+// headers before computing the seed signature.
+func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
+ // Set x-amz-content-sha256 header.
+ req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+ req.Header.Add("Content-Encoding", streamingEncoding)
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+
+ // Set content length with streaming signature for each chunk included.
+ req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
+ req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
+}
+
+// buildChunkHeader - returns the chunk header.
+// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
+func buildChunkHeader(chunkLen int64, signature string) []byte {
+ return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
+}
+
+// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
+func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
+ previousSignature, secretAccessKey string) string {
+
+ chunkStringToSign := buildChunkStringToSign(reqTime, region,
+ previousSignature, chunkData)
+ signingKey := getSigningKey(secretAccessKey, region, reqTime)
+ return getSignature(signingKey, chunkStringToSign)
+}
+
+// getSeedSignature - returns the seed signature for a given request.
+func (s *StreamingReader) setSeedSignature(req *http.Request) {
+ // Get canonical request
+ canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders)
+
+ // Get string to sign from canonical request.
+ stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest)
+
+ signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime)
+
+ // Calculate signature.
+ s.seedSignature = getSignature(signingKey, stringToSign)
+}
+
+// StreamingReader implements chunked upload signature as a reader on
+// top of req.Body's ReaderCloser chunk header;data;... repeat
+type StreamingReader struct {
+ accessKeyID string
+ secretAccessKey string
+ sessionToken string
+ region string
+ prevSignature string
+ seedSignature string
+ contentLen int64 // Content-Length from req header
+ baseReadCloser io.ReadCloser // underlying io.Reader
+ bytesRead int64 // bytes read from underlying io.Reader
+ buf bytes.Buffer // holds signed chunk
+ chunkBuf []byte // holds raw data read from req Body
+ chunkBufLen int // no. of bytes read so far into chunkBuf
+ done bool // done reading the underlying reader to EOF
+ reqTime time.Time
+ chunkNum int
+ totalChunks int
+ lastChunkSize int
+}
+
+// signChunk - signs a chunk read from s.baseReader of chunkLen size.
+func (s *StreamingReader) signChunk(chunkLen int) {
+ // Compute chunk signature for next header
+ signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
+ s.region, s.prevSignature, s.secretAccessKey)
+
+ // For next chunk signature computation
+ s.prevSignature = signature
+
+ // Write chunk header into streaming buffer
+ chunkHdr := buildChunkHeader(int64(chunkLen), signature)
+ s.buf.Write(chunkHdr)
+
+ // Write chunk data into streaming buffer
+ s.buf.Write(s.chunkBuf[:chunkLen])
+
+ // Write the chunk trailer.
+ s.buf.Write([]byte("\r\n"))
+
+ // Reset chunkBufLen for next chunk read.
+ s.chunkBufLen = 0
+ s.chunkNum++
+}
+
+// setStreamingAuthHeader - builds and sets authorization header value
+// for streaming signature.
+func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
+ credential := GetCredential(s.accessKeyID, s.region, s.reqTime)
+ authParts := []string{
+ signV4Algorithm + " Credential=" + credential,
+ "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
+ "Signature=" + s.seedSignature,
+ }
+
+ // Set authorization header.
+ auth := strings.Join(authParts, ",")
+ req.Header.Set("Authorization", auth)
+}
+
+// StreamingSignV4 - provides chunked upload signatureV4 support by
+// implementing io.Reader.
+func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
+ region string, dataLen int64, reqTime time.Time) *http.Request {
+
+ // Set headers needed for streaming signature.
+ prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
+
+ if req.Body == nil {
+ req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
+ }
+
+ stReader := &StreamingReader{
+ baseReadCloser: req.Body,
+ accessKeyID: accessKeyID,
+ secretAccessKey: secretAccessKey,
+ sessionToken: sessionToken,
+ region: region,
+ reqTime: reqTime,
+ chunkBuf: make([]byte, payloadChunkSize),
+ contentLen: dataLen,
+ chunkNum: 1,
+ totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
+ lastChunkSize: int(dataLen % payloadChunkSize),
+ }
+
+ // Add the request headers required for chunk upload signing.
+
+ // Compute the seed signature.
+ stReader.setSeedSignature(req)
+
+ // Set the authorization header with the seed signature.
+ stReader.setStreamingAuthHeader(req)
+
+ // Set seed signature as prevSignature for subsequent
+ // streaming signing process.
+ stReader.prevSignature = stReader.seedSignature
+ req.Body = stReader
+
+ return req
+}
+
+// Read - this method performs chunk upload signature providing a
+// io.Reader interface.
+func (s *StreamingReader) Read(buf []byte) (int, error) {
+ switch {
+ // After the last chunk is read from underlying reader, we
+ // never re-fill s.buf.
+ case s.done:
+
+ // s.buf will be (re-)filled with next chunk when has lesser
+ // bytes than asked for.
+ case s.buf.Len() < len(buf):
+ s.chunkBufLen = 0
+ for {
+ n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
+ // Usually we validate `err` first, but in this case
+ // we are validating n > 0 for the following reasons.
+ //
+ // 1. n > 0, err is one of io.EOF, nil (near end of stream)
+ // A Reader returning a non-zero number of bytes at the end
+ // of the input stream may return either err == EOF or err == nil
+ //
+ // 2. n == 0, err is io.EOF (actual end of stream)
+ //
+ // Callers should always process the n > 0 bytes returned
+ // before considering the error err.
+ if n1 > 0 {
+ s.chunkBufLen += n1
+ s.bytesRead += int64(n1)
+
+ if s.chunkBufLen == payloadChunkSize ||
+ (s.chunkNum == s.totalChunks-1 &&
+ s.chunkBufLen == s.lastChunkSize) {
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(s.chunkBufLen)
+ break
+ }
+ }
+ if err != nil {
+ if err == io.EOF {
+ // No more data left in baseReader - last chunk.
+ // Done reading the last chunk from baseReader.
+ s.done = true
+
+ // bytes read from baseReader different than
+ // content length provided.
+ if s.bytesRead != s.contentLen {
+ return 0, io.ErrUnexpectedEOF
+ }
+
+ // Sign the chunk and write it to s.buf.
+ s.signChunk(0)
+ break
+ }
+ return 0, err
+ }
+
+ }
+ }
+ return s.buf.Read(buf)
+}
+
+// Close - this method makes underlying io.ReadCloser's Close method available.
+func (s *StreamingReader) Close() error {
+ return s.baseReadCloser.Close()
+}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
similarity index 70%
rename from vendor/github.com/minio/minio-go/request-signature-v2.go
rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
index c14ce2aabdabbaade913b51addc5770be8f63a84..39c4e018701f26e7dc2fd2b8be162367d09fbeb1 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v2.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -40,50 +42,41 @@ const (
func encodeURL2Path(u *url.URL) (path string) {
// Encode URL path.
if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
- hostSplits := strings.SplitN(u.Host, ".", 4)
- // First element is the bucket name.
- bucketName := hostSplits[0]
+ bucketName := u.Host[:strings.LastIndex(u.Host, ".s3")]
path = "/" + bucketName
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
- path = urlEncodePath(path)
+ path = s3utils.EncodePath(path)
return
}
- path = urlEncodePath(u.Path)
+ path = s3utils.EncodePath(u.Path)
return
}
-// preSignV2 - presign the request in following style.
+// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
-func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
+func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
- d := time.Now().UTC()
- // Add date if not present.
- if date := req.Header.Get("Date"); date == "" {
- req.Header.Set("Date", d.Format(http.TimeFormat))
- }
-
- // Get encoded URL path.
- path := encodeURL2Path(req.URL)
- if len(req.URL.Query()) > 0 {
- // Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(queryEncode(req.URL.Query()))
- path = path + "?" + query
- }
+ d := time.Now().UTC()
// Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires
- // Get string to sign.
- stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path)
+ // Add expires header if not present.
+ if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
+ req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
+ }
+
+ // Get presigned string to sign.
+ stringToSign := preStringifyHTTPReq(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -102,18 +95,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
- req.URL.RawQuery = queryEncode(query)
+ req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
- req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
+ req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
}
-// postPresignSignatureV2 - presigned signature for PostPolicy
+// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
-func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
+func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
@@ -136,8 +129,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
//
// CanonicalizedProtocolHeaders =
-// signV2 sign the request before Do() (AWS Signature Version 2).
-func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
+// SignV2 sign the request before Do() (AWS Signature Version 2).
+func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -152,7 +145,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
}
// Calculate HMAC for secretAccessKey.
- stringToSign := getStringToSignV2(req)
+ stringToSign := stringifyHTTPReq(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -169,6 +162,34 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
return &req
}
+// From the Amazon docs:
+//
+// StringToSign = HTTP-Verb + "\n" +
+// Content-Md5 + "\n" +
+// Content-Type + "\n" +
+// Expires + "\n" +
+// CanonicalizedProtocolHeaders +
+// CanonicalizedResource;
+func preStringifyHTTPReq(req http.Request) string {
+ buf := new(bytes.Buffer)
+ // Write standard headers.
+ writePreSignV2Headers(buf, req)
+ // Write canonicalized protocol headers if any.
+ writeCanonicalizedHeaders(buf, req)
+ // Write canonicalized Query resources if any.
+ isPreSign := true
+ writeCanonicalizedResource(buf, req, isPreSign)
+ return buf.String()
+}
+
+// writePreSignV2Headers - write preSign v2 required headers.
+func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Expires") + "\n")
+}
+
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
@@ -177,27 +198,24 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func getStringToSignV2(req http.Request) string {
+func stringifyHTTPReq(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
- writeDefaultHeaders(buf, req)
+ writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- writeCanonicalizedResource(buf, req)
+ isPreSign := false
+ writeCanonicalizedResource(buf, req, isPreSign)
return buf.String()
}
-// writeDefaultHeader - write all default necessary headers
-func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) {
- buf.WriteString(req.Method)
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Content-Md5"))
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Content-Type"))
- buf.WriteByte('\n')
- buf.WriteString(req.Header.Get("Date"))
- buf.WriteByte('\n')
+// writeSignV2Headers - write signV2 required headers.
+func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
+ buf.WriteString(req.Method + "\n")
+ buf.WriteString(req.Header.Get("Content-Md5") + "\n")
+ buf.WriteString(req.Header.Get("Content-Type") + "\n")
+ buf.WriteString(req.Header.Get("Date") + "\n")
}
// writeCanonicalizedHeaders - write canonicalized headers.
@@ -239,18 +257,13 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
// have signature-related issues
var resourceList = []string{
"acl",
+ "delete",
"location",
"logging",
"notification",
"partNumber",
"policy",
"requestPayment",
- "response-cache-control",
- "response-content-disposition",
- "response-content-encoding",
- "response-content-language",
- "response-content-type",
- "response-expires",
"torrent",
"uploadId",
"uploads",
@@ -265,13 +278,22 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] +
// +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
path := encodeURL2Path(requestURL)
+ if isPreSign {
+ // Get encoded URL path.
+ if len(requestURL.Query()) > 0 {
+ // Keep the usual queries unescaped for string to sign.
+ query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
+ path = path + "?" + query
+ }
+ buf.WriteString(path)
+ return
+ }
buf.WriteString(path)
-
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
@@ -292,7 +314,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
- buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1))
+ buf.WriteString(vv[0])
}
}
}
diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
similarity index 82%
rename from vendor/github.com/minio/minio-go/request-signature-v4.go
rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
index dfd11e9e448f66e5039edee302178d884fcd87f5..0d75dc162697f93536cfb90ba537540c856e2602 100644
--- a/vendor/github.com/minio/minio-go/request-signature-v4.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-package minio
+package s3signer
import (
"bytes"
@@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@@ -68,7 +70,7 @@ const (
///
/// Is skipped for obvious reasons
///
-var ignoredHeaders = map[string]bool{
+var v4IgnoredHeaders = map[string]bool{
"Authorization": true,
"Content-Type": true,
"Content-Length": true,
@@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string {
return scope
}
-// getCredential generate a credential string.
-func getCredential(accessKeyID, location string, t time.Time) string {
+// GetCredential generate a credential string.
+func GetCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t)
return accessKeyID + "/" + scope
}
@@ -113,14 +115,14 @@ func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value.
- hashedPayload = "UNSIGNED-PAYLOAD"
+ hashedPayload = unsignedPayload
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers for
// signature.
-func getCanonicalHeaders(req http.Request) string {
+func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range req.Header {
@@ -159,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string {
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
-func getSignedHeaders(req http.Request) string {
+func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
@@ -181,14 +183,14 @@ func getSignedHeaders(req http.Request) string {
// \n
// \n
//
-func getCanonicalRequest(req http.Request) string {
+func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
- urlEncodePath(req.URL.Path),
+ s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
- getCanonicalHeaders(req),
- getSignedHeaders(req),
+ getCanonicalHeaders(req, ignoredHeaders),
+ getSignedHeaders(req, ignoredHeaders),
getHashedPayload(req),
}, "\n")
return canonicalRequest
@@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
return stringToSign
}
-// preSignV4 presign the request, in accordance with
+// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
-func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
+func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -214,10 +216,10 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
t := time.Now().UTC()
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
- signedHeaders := getSignedHeaders(req)
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Set URL query.
query := req.URL.Query()
@@ -226,10 +228,14 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential)
+ // Set session token if available.
+ if sessionToken != "" {
+ query.Set("X-Amz-Security-Token", sessionToken)
+ }
req.URL.RawQuery = query.Encode()
// Get canonical request.
- canonicalRequest := getCanonicalRequest(req)
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@@ -246,9 +252,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
return &req
}
-// postPresignSignatureV4 - presigned signature for PostPolicy
+// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
-func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
+func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
@@ -256,9 +262,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
return signature
}
-// signV4 sign the request before Do(), in accordance with
+// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
-func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
+func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@@ -270,8 +276,13 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
+ // Set session token if available.
+ if sessionToken != "" {
+ req.Header.Set("X-Amz-Security-Token", sessionToken)
+ }
+
// Get canonical request.
- canonicalRequest := getCanonicalRequest(req)
+ canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest)
@@ -280,10 +291,10 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
signingKey := getSigningKey(secretAccessKey, location, t)
// Get credential string.
- credential := getCredential(accessKeyID, location, t)
+ credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
- signedHeaders := getSignedHeaders(req)
+ signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)
diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
similarity index 56%
rename from vendor/github.com/minio/minio-go/signature-type.go
rename to vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
index cae74cd010087e470868639150a5dcd2d0b2330b..0619b3082862ac839b2dced27aed2f718a19e2f9 100644
--- a/vendor/github.com/minio/minio-go/signature-type.go
+++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go
@@ -14,24 +14,26 @@
* limitations under the License.
*/
-package minio
+package s3signer
-// SignatureType is type of Authorization requested for a given HTTP request.
-type SignatureType int
-
-// Different types of supported signatures - default is Latest i.e SignatureV4.
-const (
- Latest SignatureType = iota
- SignatureV4
- SignatureV2
+import (
+ "crypto/hmac"
+ "crypto/sha256"
)
-// isV2 - is signature SignatureV2?
-func (s SignatureType) isV2() bool {
- return s == SignatureV2
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+const unsignedPayload = "UNSIGNED-PAYLOAD"
+
+// sum256 calculate sha256 sum for an input byte array.
+func sum256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
}
-// isV4 - is signature SignatureV4?
-func (s SignatureType) isV4() bool {
- return s == SignatureV4 || s == Latest
+// sumHMAC calculate hmac between two input byte array.
+func sumHMAC(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
}
diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..bdc8d4e91de73c56ebaff82ad31cfb05164ebd96
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go
@@ -0,0 +1,276 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package s3utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "net"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
+
+// IsValidDomain validates if input string is a valid domain name.
+func IsValidDomain(host string) bool {
+ // See RFC 1035, RFC 3696.
+ host = strings.TrimSpace(host)
+ if len(host) == 0 || len(host) > 255 {
+ return false
+ }
+ // host cannot start or end with "-"
+ if host[len(host)-1:] == "-" || host[:1] == "-" {
+ return false
+ }
+ // host cannot start or end with "_"
+ if host[len(host)-1:] == "_" || host[:1] == "_" {
+ return false
+ }
+ // host cannot start or end with a "."
+ if host[len(host)-1:] == "." || host[:1] == "." {
+ return false
+ }
+ // All non alphanumeric characters are invalid.
+ if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
+ return false
+ }
+ // No need to regexp match, since the list is non-exhaustive.
+ // We let it valid and fail later.
+ return true
+}
+
+// IsValidIP parses input string for ip address validity.
+func IsValidIP(ip string) bool {
+ return net.ParseIP(ip) != nil
+}
+
+// IsVirtualHostSupported - verifies if bucketName can be part of
+// virtual host. Currently only Amazon S3 and Google Cloud Storage
+// would support this.
+func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ // bucketName can be valid but '.' in the hostname will fail SSL
+ // certificate validation. So do not use host-style for such buckets.
+ if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
+ return false
+ }
+ // Return true for all other cases
+ return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
+}
+
+// AmazonS3Host - regular expression used to determine if an arg is s3 host.
+var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
+
+// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
+func IsAmazonEndpoint(endpointURL url.URL) bool {
+ if IsAmazonChinaEndpoint(endpointURL) {
+ return true
+ }
+ if IsAmazonGovCloudEndpoint(endpointURL) {
+ return true
+ }
+ return AmazonS3Host.MatchString(endpointURL.Host)
+}
+
+// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
+func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
+ IsAmazonFIPSGovCloudEndpoint(endpointURL))
+}
+
+// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
+func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com"
+}
+
+// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
+// Customers who wish to use the new Beijing Region are required
+// to sign up for a separate set of account credentials unique to
+// the China (Beijing) Region. Customers with existing AWS credentials
+// will not be able to access resources in the new Region, and vice versa.
+// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
+func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
+}
+
+// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
+func IsGoogleEndpoint(endpointURL url.URL) bool {
+ if endpointURL == sentinelURL {
+ return false
+ }
+ return endpointURL.Host == "storage.googleapis.com"
+}
+
+// Expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// QueryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
+func QueryEncode(v url.Values) string {
+ if v == nil {
+ return ""
+ }
+ var buf bytes.Buffer
+ keys := make([]string, 0, len(v))
+ for k := range v {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := v[k]
+ prefix := percentEncodeSlash(EncodePath(k)) + "="
+ for _, v := range vs {
+ if buf.Len() > 0 {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(prefix)
+ buf.WriteString(percentEncodeSlash(EncodePath(v)))
+ }
+ }
+ return buf.String()
+}
+
+// if object matches reserved string, no need to encode them
+var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
+
+// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
+//
+// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
+// non english characters cannot be parsed due to the nature in which url.Encode() is written
+//
+// This function on the other hand is a direct replacement for url.Encode() technique to support
+// pretty much every UTF-8 character.
+func EncodePath(pathName string) string {
+ if reservedObjectNames.MatchString(pathName) {
+ return pathName
+ }
+ var encodedPathname string
+ for _, s := range pathName {
+ if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ }
+ switch s {
+ case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
+ encodedPathname = encodedPathname + string(s)
+ continue
+ default:
+ len := utf8.RuneLen(s)
+ if len < 0 {
+ // if utf8 cannot convert return the same string as is
+ return pathName
+ }
+ u := make([]byte, len)
+ utf8.EncodeRune(u, s)
+ for _, r := range u {
+ hex := hex.EncodeToString([]byte{r})
+ encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
+ }
+ }
+ }
+ return encodedPathname
+}
+
+// We support '.' with bucket names but we fallback to using path
+// style requests instead for such buckets.
+var (
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
+ validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+ ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+)
+
+// Common checker for both stricter and basic validation.
+func checkBucketNameCommon(bucketName string, strict bool) (err error) {
+ if strings.TrimSpace(bucketName) == "" {
+ return errors.New("Bucket name cannot be empty")
+ }
+ if len(bucketName) < 3 {
+ return errors.New("Bucket name cannot be smaller than 3 characters")
+ }
+ if len(bucketName) > 63 {
+ return errors.New("Bucket name cannot be greater than 63 characters")
+ }
+ if ipAddress.MatchString(bucketName) {
+ return errors.New("Bucket name cannot be an ip address")
+ }
+ if strings.Contains(bucketName, "..") {
+ return errors.New("Bucket name contains invalid characters")
+ }
+ if strict {
+ if !validBucketNameStrict.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+ }
+ if !validBucketName.MatchString(bucketName) {
+ err = errors.New("Bucket name contains invalid characters")
+ }
+ return err
+}
+
+// CheckValidBucketName - checks if we have a valid input bucket name.
+func CheckValidBucketName(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, false)
+}
+
+// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
+// This is a stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
+func CheckValidBucketNameStrict(bucketName string) (err error) {
+ return checkBucketNameCommon(bucketName, true)
+}
+
+// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectNamePrefix(objectName string) error {
+ if len(objectName) > 1024 {
+ return errors.New("Object name cannot be greater than 1024 characters")
+ }
+ if !utf8.ValidString(objectName) {
+ return errors.New("Object name with non UTF-8 strings are not supported")
+ }
+ return nil
+}
+
+// CheckValidObjectName - checks if we have a valid input object name.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
+func CheckValidObjectName(objectName string) error {
+ if strings.TrimSpace(objectName) == "" {
+ return errors.New("Object name cannot be empty")
+ }
+ return CheckValidObjectNamePrefix(objectName)
+}
diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
new file mode 100644
index 0000000000000000000000000000000000000000..9f33488e01a7834999c93d7d73d4c7b450aac790
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go
@@ -0,0 +1,196 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+// ToSlice - returns StringSet as string slice.
+func (set StringSet) ToSlice() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.ToSlice())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+// If 'data' contains JSON string array, the set contains each string.
+// If 'data' contains JSON string, the set contains the string as one element.
+// If 'data' contains Other JSON types, JSON parse error is returned.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []string{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(s)
+ }
+ } else {
+ var s string
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(s)
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.ToSlice())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go
index 2a675d770ec33d94397d22cf93c70c4b66a87205..5e716124a845717fe4d80fb9b78a61bce40e0701 100644
--- a/vendor/github.com/minio/minio-go/post-policy.go
+++ b/vendor/github.com/minio/minio-go/post-policy.go
@@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
return nil
}
+// SetSuccessStatusAction - Sets the status success code of the object for this policy
+// based upload.
+func (p *PostPolicy) SetSuccessStatusAction(status string) error {
+ if strings.TrimSpace(status) == "" || status == "" {
+ return ErrInvalidArgument("Status is empty")
+ }
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: "$success_action_status",
+ value: status,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData["success_action_status"] = status
+ return nil
+}
+
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/request-headers.go
new file mode 100644
index 0000000000000000000000000000000000000000..76c87202d53ddfb32c918f315a943e3a1ec2824f
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/request-headers.go
@@ -0,0 +1,111 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+)
+
+// RequestHeaders - implement methods for setting special
+// request headers for GET, HEAD object operations.
+// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
+type RequestHeaders struct {
+ http.Header
+}
+
+// NewGetReqHeaders - initializes a new request headers for GET request.
+func NewGetReqHeaders() RequestHeaders {
+ return RequestHeaders{
+ Header: make(http.Header),
+ }
+}
+
+// NewHeadReqHeaders - initializes a new request headers for HEAD request.
+func NewHeadReqHeaders() RequestHeaders {
+ return RequestHeaders{
+ Header: make(http.Header),
+ }
+}
+
+// SetMatchETag - set match etag.
+func (c RequestHeaders) SetMatchETag(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.Set("If-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetMatchETagExcept - set match etag except.
+func (c RequestHeaders) SetMatchETagExcept(etag string) error {
+ if etag == "" {
+ return ErrInvalidArgument("ETag cannot be empty.")
+ }
+ c.Set("If-None-Match", "\""+etag+"\"")
+ return nil
+}
+
+// SetUnmodified - set unmodified time since.
+func (c RequestHeaders) SetUnmodified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetModified - set modified time since.
+func (c RequestHeaders) SetModified(modTime time.Time) error {
+ if modTime.IsZero() {
+ return ErrInvalidArgument("Modified since cannot be empty.")
+ }
+ c.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
+ return nil
+}
+
+// SetRange - set the start and end offset of the object to be read.
+// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
+func (c RequestHeaders) SetRange(start, end int64) error {
+ switch {
+ case start == 0 && end < 0:
+ // Read last '-end' bytes. `bytes=-N`.
+ c.Set("Range", fmt.Sprintf("bytes=%d", end))
+ case 0 < start && end == 0:
+ // Read everything starting from offset
+ // 'start'. `bytes=N-`.
+ c.Set("Range", fmt.Sprintf("bytes=%d-", start))
+ case 0 <= start && start <= end:
+ // Read everything starting at 'start' till the
+ // 'end'. `bytes=N-M`
+ c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
+ default:
+ // All other cases such as
+ // bytes=-3-
+ // bytes=5-3
+ // bytes=-2-4
+ // bytes=-3-0
+ // bytes=-3--2
+ // are invalid.
+ return ErrInvalidArgument(
+ fmt.Sprintf(
+ "Invalid range specified: start=%d end=%d",
+ start, end))
+ }
+ return nil
+}
diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go
new file mode 100644
index 0000000000000000000000000000000000000000..e300af69c5a839ed24e2db48cbc93a8ff1a4e5f5
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/retry-continous.go
@@ -0,0 +1,52 @@
+package minio
+
+import "time"
+
+// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
+func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
+ attemptCh := make(chan int)
+
+ // normalize jitter to the range [0, 1.0]
+ if jitter < NoJitter {
+ jitter = NoJitter
+ }
+ if jitter > MaxJitter {
+ jitter = MaxJitter
+ }
+
+ // computes the exponential backoff duration according to
+ // https://www.awsarchitectureblog.com/2015/03/backoff.html
+ exponentialBackoffWait := func(attempt int) time.Duration {
+ // 1< maxAttempt {
+ attempt = maxAttempt
+ }
+ //sleep = random_between(0, min(cap, base * 2 ** attempt))
+ sleep := unit * time.Duration(1< cap {
+ sleep = cap
+ }
+ if jitter != NoJitter {
+ sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
+ }
+ return sleep
+ }
+
+ go func() {
+ defer close(attemptCh)
+ var nextBackoff int
+ for {
+ select {
+ // Attempts starts.
+ case attemptCh <- nextBackoff:
+ nextBackoff++
+ case <-doneCh:
+ // Stop the routine.
+ return
+ }
+ time.Sleep(exponentialBackoffWait(nextBackoff))
+ }
+ }()
+ return attemptCh
+}
diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go
index 41b70e4748f9483fa47934db363adbba07952e7a..1de5107e4a60e6dfb172854ba9c4fe2574e8539d 100644
--- a/vendor/github.com/minio/minio-go/retry.go
+++ b/vendor/github.com/minio/minio-go/retry.go
@@ -33,8 +33,16 @@ const MaxJitter = 1.0
// NoJitter disables the use of jitter for randomizing the exponential backoff time
const NoJitter = 0.0
-// newRetryTimer creates a timer with exponentially increasing delays
-// until the maximum retry attempts are reached.
+// DefaultRetryUnit - default unit multiplicative per retry.
+// defaults to 1 second.
+const DefaultRetryUnit = time.Second
+
+// DefaultRetryCap - Each retry attempt never waits no longer than
+// this maximum time duration.
+const DefaultRetryCap = time.Second * 30
+
+// newRetryTimer creates a timer with exponentially increasing
+// delays until the maximum retry attempts are reached.
func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
@@ -78,6 +86,9 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio
// isNetErrorRetryable - is network error retryable.
func isNetErrorRetryable(err error) bool {
+ if err == nil {
+ return false
+ }
switch err.(type) {
case net.Error:
switch err.(type) {
@@ -96,6 +107,9 @@ func isNetErrorRetryable(err error) bool {
} else if strings.Contains(err.Error(), "i/o timeout") {
// If error is - tcp timeoutError, retry.
return true
+ } else if strings.Contains(err.Error(), "connection timed out") {
+ // If err is a net.Dial timeout, retry.
+ return true
}
}
}
diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go
index a46b5e335b88d1876609f9bbda487d7b62baf52a..c02f3f1fa301bd570f52f8bc9fa5463e644e256e 100644
--- a/vendor/github.com/minio/minio-go/s3-endpoints.go
+++ b/vendor/github.com/minio/minio-go/s3-endpoints.go
@@ -20,14 +20,20 @@ package minio
// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
+ "us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
+ "ca-central-1": "s3.ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
+ "eu-west-2": "s3-eu-west-2.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
+ "ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
+ "us-gov-west-1": "s3-us-gov-west-1.amazonaws.com",
"cn-north-1": "s3.cn-north-1.amazonaws.com.cn",
}
diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go
new file mode 100644
index 0000000000000000000000000000000000000000..c5aff9bbcfbe870fbebc0da88d086d860a1791c1
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/s3-error.go
@@ -0,0 +1,60 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+// Non exhaustive list of AWS S3 standard error responses -
+// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
+var s3ErrorResponseMap = map[string]string{
+ "AccessDenied": "Access Denied.",
+ "BadDigest": "The Content-Md5 you specified did not match what we received.",
+ "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.",
+ "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.",
+ "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.",
+ "InternalError": "We encountered an internal error, please try again.",
+ "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.",
+ "InvalidBucketName": "The specified bucket is not valid.",
+ "InvalidDigest": "The Content-Md5 you specified is not valid.",
+ "InvalidRange": "The requested range is not satisfiable",
+ "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.",
+ "MissingContentLength": "You must provide the Content-Length HTTP header.",
+ "MissingContentMD5": "Missing required header for this request: Content-Md5.",
+ "MissingRequestBodyError": "Request body is empty.",
+ "NoSuchBucket": "The specified bucket does not exist",
+ "NoSuchBucketPolicy": "The bucket policy does not exist",
+ "NoSuchKey": "The specified key does not exist.",
+ "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
+ "NotImplemented": "A header you provided implies functionality that is not implemented",
+ "PreconditionFailed": "At least one of the pre-conditions you specified did not hold",
+ "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.",
+ "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
+ "MethodNotAllowed": "The specified method is not allowed against this resource.",
+ "InvalidPart": "One or more of the specified parts could not be found.",
+ "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
+ "InvalidObjectState": "The operation is not valid for the current state of the object.",
+ "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.",
+ "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.",
+ "BucketNotEmpty": "The bucket you tried to delete is not empty",
+ "AllAccessDisabled": "All access to this bucket has been disabled.",
+ "MalformedPolicy": "Policy has invalid resource.",
+ "MissingFields": "Missing fields in request.",
+ "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".",
+ "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
+ "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.",
+ "InvalidDuration": "Duration provided in the request is invalid.",
+ "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.",
+ // Add new API errors here.
+}
diff --git a/vendor/github.com/minio/minio-go/tempfile.go b/vendor/github.com/minio/minio-go/tempfile.go
deleted file mode 100644
index 65c7b0da167faee376c6bdacd72dd63be116b11f..0000000000000000000000000000000000000000
--- a/vendor/github.com/minio/minio-go/tempfile.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "io/ioutil"
- "os"
- "sync"
-)
-
-// tempFile - temporary file container.
-type tempFile struct {
- *os.File
- mutex *sync.Mutex
-}
-
-// newTempFile returns a new temporary file, once closed it automatically deletes itself.
-func newTempFile(prefix string) (*tempFile, error) {
- // use platform specific temp directory.
- file, err := ioutil.TempFile(os.TempDir(), prefix)
- if err != nil {
- return nil, err
- }
- return &tempFile{
- File: file,
- mutex: &sync.Mutex{},
- }, nil
-}
-
-// Close - closer wrapper to close and remove temporary file.
-func (t *tempFile) Close() error {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- if t.File != nil {
- // Close the file.
- if err := t.File.Close(); err != nil {
- return err
- }
- // Remove file.
- if err := os.Remove(t.File.Name()); err != nil {
- return err
- }
- t.File = nil
- }
- return nil
-}
diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go
new file mode 100644
index 0000000000000000000000000000000000000000..d286bd7aebdba6a012dee1f087407bf86c978416
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/transport.go
@@ -0,0 +1,48 @@
+// +build go1.7 go1.8
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// This default transport is similar to http.DefaultTransport
+// but with additional DisableCompression:
+var defaultMinioTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+}
diff --git a/vendor/github.com/minio/minio-go/transport_1_5.go b/vendor/github.com/minio/minio-go/transport_1_5.go
new file mode 100644
index 0000000000000000000000000000000000000000..468daafd340fcbaf68ca6b00aa85aa72e4befe0a
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/transport_1_5.go
@@ -0,0 +1,39 @@
+// +build go1.5,!go1.6,!go1.7,!go1.8
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "time"
+)
+
+// This default transport is similar to http.DefaultTransport
+// but with additional DisableCompression:
+var defaultMinioTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSHandshakeTimeout: 10 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+}
diff --git a/vendor/github.com/minio/minio-go/transport_1_6.go b/vendor/github.com/minio/minio-go/transport_1_6.go
new file mode 100644
index 0000000000000000000000000000000000000000..77e7d76fc9cdf9f4083057a69b94be37a96e26d2
--- /dev/null
+++ b/vendor/github.com/minio/minio-go/transport_1_6.go
@@ -0,0 +1,40 @@
+// +build go1.6,!go1.7,!go1.8
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * (C) 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net/http"
+ "time"
+)
+
+// This default transport is similar to http.DefaultTransport
+// but with additional DisableCompression:
+var defaultMinioTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+}
diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go
index 10466584930a5cdc457db3cd09437da8d5140261..6f54639e0c140e01b9acbb57c341db301ae0700c 100644
--- a/vendor/github.com/minio/minio-go/utils.go
+++ b/vendor/github.com/minio/minio-go/utils.go
@@ -17,11 +17,8 @@
package minio
import (
- "bytes"
- "crypto/hmac"
"crypto/md5"
"crypto/sha256"
- "encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@@ -29,10 +26,10 @@ import (
"net/http"
"net/url"
"regexp"
- "sort"
"strings"
"time"
- "unicode/utf8"
+
+ "github.com/minio/minio-go/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
@@ -55,33 +52,26 @@ func sumMD5(data []byte) []byte {
return hash.Sum(nil)
}
-// sumHMAC calculate hmac between two input byte array.
-func sumHMAC(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
// getEndpointURL - construct a new endpoint.
-func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
+func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
host, _, err := net.SplitHostPort(endpoint)
if err != nil {
return nil, err
}
- if !isValidIP(host) && !isValidDomain(host) {
+ if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
- if !isValidIP(endpoint) && !isValidDomain(endpoint) {
+ if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
}
- // if inSecure is true, use 'http' scheme.
+ // If secure is false, use 'http' scheme.
scheme := "https"
- if inSecure {
+ if !secure {
scheme = "http"
}
@@ -93,45 +83,12 @@ func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) {
}
// Validate incoming endpoint URL.
- if err := isValidEndpointURL(endpointURL); err != nil {
+ if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
-// isValidDomain validates if input string is a valid domain name.
-func isValidDomain(host string) bool {
- // See RFC 1035, RFC 3696.
- host = strings.TrimSpace(host)
- if len(host) == 0 || len(host) > 255 {
- return false
- }
- // host cannot start or end with "-"
- if host[len(host)-1:] == "-" || host[:1] == "-" {
- return false
- }
- // host cannot start or end with "_"
- if host[len(host)-1:] == "_" || host[:1] == "_" {
- return false
- }
- // host cannot start or end with a "."
- if host[len(host)-1:] == "." || host[:1] == "." {
- return false
- }
- // All non alphanumeric characters are invalid.
- if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:>/") {
- return false
- }
- // No need to regexp match, since the list is non-exhaustive.
- // We let it valid and fail later.
- return true
-}
-
-// isValidIP parses input string for ip address validity.
-func isValidIP(ip string) bool {
- return net.ParseIP(ip) != nil
-}
-
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
@@ -152,73 +109,26 @@ func closeResponse(resp *http.Response) {
}
}
-// isVirtualHostSupported - verifies if bucketName can be part of
-// virtual host. Currently only Amazon S3 and Google Cloud Storage would
-// support this.
-func isVirtualHostSupported(endpointURL *url.URL, bucketName string) bool {
- // bucketName can be valid but '.' in the hostname will fail SSL
- // certificate validation. So do not use host-style for such buckets.
- if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
- return false
- }
- // Return true for all other cases
- return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
-}
+var emptySHA256 = sum256(nil)
-// Match if it is exactly Amazon S3 endpoint.
-func isAmazonEndpoint(endpointURL *url.URL) bool {
- if endpointURL == nil {
- return false
- }
- if endpointURL.Host == "s3.amazonaws.com" {
- return true
- }
- if isAmazonChinaEndpoint(endpointURL) {
- return true
- }
- return false
-}
-
-// Match if it is exactly Amazon S3 China endpoint.
-// Customers who wish to use the new Beijing Region are required to sign up for a separate set of account credentials unique to the China (Beijing) Region.
-// Customers with existing AWS credentials will not be able to access resources in the new Region, and vice versa."
-// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
-func isAmazonChinaEndpoint(endpointURL *url.URL) bool {
- if endpointURL == nil {
- return false
- }
- if endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn" {
- return true
- }
- return false
-}
-
-// Match if it is exactly Google cloud storage endpoint.
-func isGoogleEndpoint(endpointURL *url.URL) bool {
- if endpointURL == nil {
- return false
- }
- if endpointURL.Host == "storage.googleapis.com" {
- return true
- }
- return false
-}
+// Sentinel URL is the default url value which is invalid.
+var sentinelURL = url.URL{}
// Verify if input endpoint URL is valid.
-func isValidEndpointURL(endpointURL *url.URL) error {
- if endpointURL == nil {
+func isValidEndpointURL(endpointURL url.URL) error {
+ if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
- if strings.Contains(endpointURL.Host, ".amazonaws.com") {
- if !isAmazonEndpoint(endpointURL) {
+ if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") {
+ if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
if strings.Contains(endpointURL.Host, ".googleapis.com") {
- if !isGoogleEndpoint(endpointURL) {
+ if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
@@ -237,121 +147,68 @@ func isValidExpiry(expires time.Duration) error {
return nil
}
-// We support '.' with bucket names but we fallback to using path
-// style requests instead for such buckets.
-var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
-
-// isValidBucketName - verify bucket name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
-func isValidBucketName(bucketName string) error {
- if strings.TrimSpace(bucketName) == "" {
- return ErrInvalidBucketName("Bucket name cannot be empty.")
- }
- if len(bucketName) < 3 {
- return ErrInvalidBucketName("Bucket name cannot be smaller than 3 characters.")
- }
- if len(bucketName) > 63 {
- return ErrInvalidBucketName("Bucket name cannot be greater than 63 characters.")
- }
- if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
- return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
- }
- if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
- return ErrInvalidBucketName("Bucket name cannot have successive periods.")
- }
- if !validBucketName.MatchString(bucketName) {
- return ErrInvalidBucketName("Bucket name contains invalid characters.")
+// make a copy of http.Header
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
}
- return nil
+ return h2
}
-// isValidObjectName - verify object name in accordance with
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
-func isValidObjectName(objectName string) error {
- if strings.TrimSpace(objectName) == "" {
- return ErrInvalidObjectName("Object name cannot be empty.")
- }
- if len(objectName) > 1024 {
- return ErrInvalidObjectName("Object name cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectName) {
- return ErrInvalidBucketName("Object name with non UTF-8 strings are not supported.")
+// Filter relevant response headers from
+// the HEAD, GET http response. The function takes
+// a list of headers which are filtered out and
+// returned as a new http header.
+func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
+ filteredHeader = cloneHeader(header)
+ for _, key := range filterKeys {
+ filteredHeader.Del(key)
}
- return nil
+ return filteredHeader
}
-// isValidObjectPrefix - verify if object prefix is valid.
-func isValidObjectPrefix(objectPrefix string) error {
- if len(objectPrefix) > 1024 {
- return ErrInvalidObjectPrefix("Object prefix cannot be greater than 1024 characters.")
- }
- if !utf8.ValidString(objectPrefix) {
- return ErrInvalidObjectPrefix("Object prefix with non UTF-8 strings are not supported.")
- }
- return nil
-}
+// regCred matches credential string in HTTP header
+var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
-// queryEncode - encodes query values in their URL encoded form.
-func queryEncode(v url.Values) string {
- if v == nil {
- return ""
- }
- var buf bytes.Buffer
- keys := make([]string, 0, len(v))
- for k := range v {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- vs := v[k]
- prefix := urlEncodePath(k) + "="
- for _, v := range vs {
- if buf.Len() > 0 {
- buf.WriteByte('&')
- }
- buf.WriteString(prefix)
- buf.WriteString(urlEncodePath(v))
- }
+// regCred matches signature string in HTTP header
+var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
+
+// Redact out signature value from authorization string.
+func redactSignature(origAuth string) string {
+ if !strings.HasPrefix(origAuth, signV4Algorithm) {
+ // Set a temporary redacted auth
+ return "AWS **REDACTED**:**REDACTED**"
}
- return buf.String()
+
+ /// Signature V4 authorization header.
+
+ // Strip out accessKeyID from:
+ // Credential=////aws4_request
+ newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
+
+ // Strip out 256-bit signature from: Signature=<256-bit signature>
+ return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
}
-// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
-//
-// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
-// non english characters cannot be parsed due to the nature in which url.Encode() is written
+// Get default location returns the location based on the input
+// URL `u`, if region override is provided then all location
+// defaults to regionOverride.
//
-// This function on the other hand is a direct replacement for url.Encode() technique to support
-// pretty much every UTF-8 character.
-func urlEncodePath(pathName string) string {
- // if object matches reserved string, no need to encode them
- reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
- if reservedNames.MatchString(pathName) {
- return pathName
+// If no other cases match then the location is set to `us-east-1`
+// as a last resort.
+func getDefaultLocation(u url.URL, regionOverride string) (location string) {
+ if regionOverride != "" {
+ return regionOverride
}
- var encodedPathname string
- for _, s := range pathName {
- if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- }
- switch s {
- case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
- encodedPathname = encodedPathname + string(s)
- continue
- default:
- len := utf8.RuneLen(s)
- if len < 0 {
- // if utf8 cannot convert return the same string as is
- return pathName
- }
- u := make([]byte, len)
- utf8.EncodeRune(u, s)
- for _, r := range u {
- hex := hex.EncodeToString([]byte{r})
- encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
- }
- }
+ if s3utils.IsAmazonChinaEndpoint(u) {
+ return "cn-north-1"
+ }
+ if s3utils.IsAmazonGovCloudEndpoint(u) {
+ return "us-gov-west-1"
}
- return encodedPathname
+ // Default to location to 'us-east-1'.
+ return "us-east-1"
}
diff --git a/vendor/gitlab.com/ayufan/golang-cli-helpers/struct_field.go b/vendor/gitlab.com/ayufan/golang-cli-helpers/struct_field.go
index 4f7e58f4530301d51bbd1620c5b8fecd7e5c326c..3a24d002203685ac709ced11cb307f2c1af40a37 100644
--- a/vendor/gitlab.com/ayufan/golang-cli-helpers/struct_field.go
+++ b/vendor/gitlab.com/ayufan/golang-cli-helpers/struct_field.go
@@ -1,9 +1,10 @@
package clihelpers
import (
- "strings"
- "reflect"
"github.com/urfave/cli"
+ "reflect"
+ "regexp"
+ "strings"
)
type StructFieldValue struct {
@@ -26,6 +27,9 @@ func (s StructFieldValue) Set(val string) error {
}
func (s StructFieldValue) String() string {
+ if !s.value.IsValid() {
+ return ""
+ }
if s.value.Kind() == reflect.Ptr && s.value.IsNil() {
return ""
}
@@ -41,16 +45,16 @@ func (f StructFieldFlag) String() string {
if sf, ok := f.Value.(StructFieldValue); ok {
if sf.IsBoolFlag() {
flag := &cli.BoolFlag{
- Name: f.Name,
- Usage: f.Usage,
+ Name: f.Name,
+ Usage: f.Usage,
EnvVar: f.EnvVar,
}
return flag.String()
} else {
flag := &cli.StringFlag{
- Name: f.Name,
- Value: sf.String(),
- Usage: f.Usage,
+ Name: f.Name,
+ Value: sf.String(),
+ Usage: f.Usage,
EnvVar: f.EnvVar,
}
return flag.String()
@@ -60,6 +64,8 @@ func (f StructFieldFlag) String() string {
}
}
+var reName = regexp.MustCompile(`{(\w+)}`)
+
func getStructFieldFlag(field reflect.StructField, fieldValue reflect.Value, ns []string) []cli.Flag {
var names []string
@@ -80,7 +86,7 @@ func getStructFieldFlag(field reflect.StructField, fieldValue reflect.Value, ns
field: field,
value: fieldValue,
},
- Usage: field.Tag.Get("description"),
+ Usage: reName.ReplaceAllString(field.Tag.Get("description"), "`$1`"),
EnvVar: field.Tag.Get("env"),
}
return []cli.Flag{StructFieldFlag{GenericFlag: flag}}
@@ -142,6 +148,6 @@ func getFlagsForValue(value reflect.Value, ns []string) []cli.Flag {
return flags
}
-func GetFlagsFromStruct(data interface{}, ns... string) []cli.Flag {
+func GetFlagsFromStruct(data interface{}, ns ...string) []cli.Flag {
return getFlagsForValue(reflect.ValueOf(data), ns)
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 8f375e15b8c7bf33ea4eae3246857e9f46456625..3ec637f742bc802b68d6567dfcbe7cf562fd4e9a 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -680,6 +680,12 @@
"path": "github.com/ghodss/yaml",
"revision": "aa0c862057666179de291b67d9f093d12b5a8473"
},
+ {
+ "checksumSHA1": "87LEfpY9cOk9CP7pWyIbmQ/6enU=",
+ "path": "github.com/go-ini/ini",
+ "revision": "20b96f641a5ea98f2f8619ff4f3e061cff4833bd",
+ "revisionTime": "2017-08-13T05:15:16Z"
+ },
{
"checksumSHA1": "dyojUOIWcH5VOLo2R7VMqF0bttg=",
"comment": "v0.2-61-gf20a144",
@@ -771,10 +777,53 @@
"revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c"
},
{
- "checksumSHA1": "2U9NHIz+Zd7WwBkU84fKDk4OEk4=",
- "comment": "v1.0.0-94-ge9fd7b8",
+ "checksumSHA1": "q8VfSvQ6fZY+QwtlMPtQsJrNews=",
+ "path": "github.com/minio/go-homedir",
+ "revision": "21304a94172ae3a09dee2cd86a12fb6f842138c7",
+ "revisionTime": "2016-02-15T11:55:11Z"
+ },
+ {
+ "checksumSHA1": "wU+t3PuZkLcNmS47J775XC54p2c=",
+ "comment": "3.0.3",
"path": "github.com/minio/minio-go",
- "revision": "e9fd7b820d9a87122e159439ca034472040a43ce"
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "qPt3l9ZUjG0J4oZNTSrKy17rts0=",
+ "path": "github.com/minio/minio-go/pkg/credentials",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "pggIpSePizRBQ7ybhB0CuaSQydw=",
+ "path": "github.com/minio/minio-go/pkg/encrypt",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "3tl2ehmod/EzXE9o9WJ5HM2AQPE=",
+ "path": "github.com/minio/minio-go/pkg/policy",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "iEpQa89Q4IbHhfMW1qYJMOy9jN4=",
+ "path": "github.com/minio/minio-go/pkg/s3signer",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "P9p0KiZWh9shIFSqTaIeY4/KYXo=",
+ "path": "github.com/minio/minio-go/pkg/s3utils",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
+ },
+ {
+ "checksumSHA1": "maUy+dbN6VfTTnfErrAW2lLit1w=",
+ "path": "github.com/minio/minio-go/pkg/set",
+ "revision": "4e0f567303d4cc90ceb055a451959fb9fc391fb9",
+ "revisionTime": "2017-09-12T21:15:23Z"
},
{
"checksumSHA1": "P77+l5Pc0uBc1xQvBU67O642LvU=",
@@ -944,10 +993,10 @@
"revisionTime": "2016-09-27T02:54:18Z"
},
{
- "checksumSHA1": "AsIommRofK62X9WOgOrtHXRFjqo=",
+ "checksumSHA1": "uzHhbqAVbv+H66DY5kDOJ9C85GM=",
"path": "gitlab.com/ayufan/golang-cli-helpers",
- "revision": "308be8b1ccf3b8e896a0e572ea2eb7599d369bef",
- "revisionTime": "2017-04-25T20:31:14Z"
+ "revision": "a7cf72d604cdf0af6031dd5d54a4e513abeff0d4",
+ "revisionTime": "2017-11-03T15:27:39Z"
},
{
"checksumSHA1": "h+pFYiRHBogczS8/F1NoN3Ata44=",