Compare commits
290 Commits
developmen
...
fwdport/6.
Author | SHA1 | Date |
---|---|---|
Rahul Padigela | a26af365d9 | |
Lauren Spiegel | 7c05150f1d | |
Electra Chong | dda6c4e551 | |
Electra Chong | beb93d0f65 | |
Lauren Spiegel | 3c889df957 | |
Lauren Spiegel | ae02ea7896 | |
Lauren Spiegel | ecd3951d09 | |
Lauren Spiegel | 72c4384b20 | |
Lauren Spiegel | cadc669b76 | |
Rached Ben Mustapha | ba52f4f2b1 | |
Rached Ben Mustapha | 54f224cde0 | |
Rached Ben Mustapha | 5bf463ac31 | |
Lauren Spiegel | df2c4a41f4 | |
Dora Korpar | a83a402672 | |
Lauren Spiegel | 7dac05eb22 | |
Dora Korpar | d0c71cb778 | |
Lauren Spiegel | 8aa9e76d96 | |
Marc Ségura | 24b6ac1609 | |
Lauren Spiegel | 612d5ca8b6 | |
Jonathan Gramain | a07e188d08 | |
Lauren Spiegel | 7312053539 | |
Nicolas Humbert | ac8b3c267c | |
Lauren Spiegel | b96a81fd70 | |
Electra Chong | 7e928addcf | |
Lauren Spiegel | e811f63dff | |
Electra Chong | fe7cf1e750 | |
Electra Chong | 83003e2378 | |
Electra Chong | d7d887b284 | |
Lauren Spiegel | 982efd91c5 | |
Nicolas Humbert | b2bedd20db | |
Lauren Spiegel | 426cddc564 | |
Dora Korpar | 464cb74d42 | |
Lauren Spiegel | 52f2265d59 | |
Lauren Spiegel | c8ad30964c | |
Lauren Spiegel | 4391b7ce3f | |
Lauren Spiegel | 25cb39df38 | |
Lauren Spiegel | c9e220fa1a | |
Electra Chong | c354c7c6a8 | |
Lauren Spiegel | 7c8c9443fd | |
Lauren Spiegel | d3756f35a3 | |
Lauren Spiegel | df329a9e74 | |
Lauren Spiegel | 6150fdff63 | |
Lauren Spiegel | 4091a15545 | |
Alexandre Merle | fcca62b303 | |
Lauren Spiegel | 28c509a477 | |
Electra Chong | dfb97ba4c6 | |
Lauren Spiegel | 1bb17a6502 | |
Nicolas Humbert | 41529cd208 | |
Lauren Spiegel | 0961721fbd | |
Dora Korpar | 9c33ae6df6 | |
Lauren Spiegel | 4a6a247839 | |
Bennett Buchanan | 7daa9a5390 | |
Lauren Spiegel | 6201d5b074 | |
Nicolas Humbert | 7076660d8b | |
Lauren Spiegel | 6b8443012e | |
Alexandre Merle | c22e44f63d | |
Lauren Spiegel | 246ada580c | |
Nicolas Humbert | 612f9f6a7b | |
Rahul Padigela | 9efa353228 | |
Bennett Buchanan | ae14f00eb2 | |
Vianney Rancurel | 82c9204212 | |
Lauren Spiegel | 1ae15540d5 | |
Lauren Spiegel | e579bf893a | |
Electra Chong | 7dc199f65f | |
Lauren Spiegel | cac7450507 | |
Electra Chong | 0ac05ae828 | |
Lauren Spiegel | 5f32e0479f | |
Nicolas Humbert | ad876da61d | |
Lauren Spiegel | 54f057cb21 | |
Nicolas Humbert | 126d1000bd | |
Lauren Spiegel | 14dc60e288 | |
Nicolas Humbert | 9775048cbb | |
Lauren Spiegel | 330001477b | |
Nicolas Humbert | fcc9dc799e | |
Lauren Spiegel | fd7464e92b | |
Dora Korpar | 29ef286a0d | |
Lauren Spiegel | 82897e2053 | |
Electra Chong | ee5b8811d6 | |
Lauren Spiegel | 67a12fa551 | |
Bennett Buchanan | 7ed6a14fe7 | |
Lauren Spiegel | 41bd9ad69d | |
Nicolas Humbert | 0e6e4fbad2 | |
Lauren Spiegel | 260aa509ef | |
Nicolas Humbert | cc105ab05b | |
Lauren Spiegel | 94d880ed69 | |
Nicolas Humbert | a3fa60b24c | |
Lauren Spiegel | 6801842239 | |
Lauren Spiegel | 4c106b0870 | |
Lauren Spiegel | a466c6ded9 | |
Electra Chong | 068d2520b1 | |
Lauren Spiegel | 12c33be6e1 | |
Nicolas Humbert | 0085179415 | |
Lauren Spiegel | 0bafb45378 | |
Nicolas Humbert | 2aa584b97e | |
Nicolas Humbert | 66288573b1 | |
Lauren Spiegel | c2a84908cd | |
Electra Chong | f8532a9ae8 | |
Electra Chong | 4609f62e04 | |
Lauren Spiegel | 83a3f41e35 | |
Nicolas Humbert | e8f62f24fb | |
Lauren Spiegel | 64791b1424 | |
Jonathan Gramain | 830a2f8203 | |
Jonathan Gramain | fbcd4e9da7 | |
Jonathan Gramain | a29091d306 | |
Lauren Spiegel | 61307e7683 | |
Electra Chong | 4514541265 | |
Lauren Spiegel | bfc69481e3 | |
Electra Chong | 0bced55129 | |
Lauren Spiegel | 24f60dbe69 | |
Electra Chong | f0c2e06197 | |
Lauren Spiegel | 0f6bc184c0 | |
Lauren Spiegel | 3f4542eb3c | |
Lauren Spiegel | 47e6a15eb7 | |
Nicolas Humbert | dbe3e817e9 | |
Lauren Spiegel | a78c65e4cd | |
Electra Chong | 3f8bc9eafa | |
Lauren Spiegel | 6a8e1cd72f | |
Nicolas Humbert | 2dd1eb133a | |
Lauren Spiegel | 71db32c829 | |
Vinh Tao | dd453becba | |
Jonathan Gramain | 37638ed0ed | |
Lauren Spiegel | 7318a9de58 | |
Electra Chong | ef36f84525 | |
Lauren Spiegel | 284d0bc9bc | |
Nicolas Humbert | 39baa0bede | |
Lauren Spiegel | 00aabbfc01 | |
Nicolas Humbert | 0f4ef8e02b | |
Lauren Spiegel | de9e8b3b07 | |
Nicolas Humbert | 4792fe6fe1 | |
Lauren Spiegel | 3f9c2a5d2d | |
Electra Chong | 97a5633ef0 | |
Lauren Spiegel | d5f47c5b67 | |
Rahul Padigela | c3a38af756 | |
Rahul Padigela | 4c1d0d7084 | |
Lauren Spiegel | 367903472a | |
Nicolas Humbert | 80ae2d8b9e | |
Lauren Spiegel | 9f036e59ec | |
Dora Korpar | f14bc5fb1a | |
Lauren Spiegel | c65d3c9f31 | |
Dora Korpar | 118f128a07 | |
Lauren Spiegel | 2413188998 | |
Lauren Spiegel | a64bac4361 | |
Giorgio Regni | 441ba89c48 | |
Lauren Spiegel | 049e1204c0 | |
Nicolas Humbert | fe717fc826 | |
Lauren Spiegel | 293ff1e1ed | |
Nicolas Humbert | a5919d51ea | |
Lauren Spiegel | e08f57fbd7 | |
Jonathan Gramain | b9adc5e969 | |
Bennett Buchanan | d1efb3b842 | |
Bennett Buchanan | 60159e1418 | |
Lauren Spiegel | 599573deb6 | |
Rached Ben Mustapha | db4e00faed | |
Lauren Spiegel | 3c67970d07 | |
Nicolas Humbert | 7e532cf416 | |
Lauren Spiegel | f44351ce0c | |
Electra Chong | b5b943741f | |
Lauren Spiegel | 00a26a32b9 | |
Nicolas Humbert | f534cc1088 | |
Lauren Spiegel | f381302ff8 | |
Vinh Tao | 68e4608d6b | |
Vinh Tao | 9e4cd90017 | |
Lauren Spiegel | f6ad4c5110 | |
Dora Korpar | 6310f4d325 | |
Lauren Spiegel | 7b60f60f0b | |
Nicolas Humbert | 1edb581bb0 | |
Lauren Spiegel | 84229c1a3c | |
Christian Patry | f190e9186d | |
Electra Chong | ae5a81f1cd | |
Electra Chong | ec2c123684 | |
Electra Chong | ea928f0e9f | |
Electra Chong | 0cc7df6382 | |
Vinh Tao | 86cabedf27 | |
Vinh Tao | 4c99e25ce6 | |
Vinh Tao | f22be3850d | |
Vinh Tao | add060a35a | |
Vinh Tao | 4256c94992 | |
Vinh Tao | aeacd163f7 | |
Vinh Tao | 98ecb15ada | |
Electra Chong | a3f31a95ca | |
Vinh Tao | aab7cac02b | |
Electra Chong | 3c290dff1d | |
Electra Chong | 0527badabe | |
Vinh Tao | 3b6598650d | |
Vinh Tao | b21d9ac9bc | |
Vinh Tao | bd40bb506f | |
Vinh Tao | ab2b1867ed | |
Vinh Tao | dcccbf09a6 | |
Vinh Tao | 633da4c13c | |
Vinh Tao | e200b3334f | |
Vinh Tao | 85b173361b | |
Vinh Tao | 700364d3dd | |
Vinh Tao | f9ef5a9b8c | |
Vinh Tao | 3edd311a24 | |
Vinh Tao | 4e7eb9231e | |
Vinh Tao | 84d5084587 | |
Vinh Tao | 67745772de | |
Lauren Spiegel | 362c579cf4 | |
Nicolas Humbert | fbd0e01689 | |
Lauren Spiegel | c02af7f814 | |
Bennett Buchanan | 146dd0ca9a | |
Lauren Spiegel | be072cdc62 | |
Nicolas Humbert | 3765a68bcf | |
Lauren Spiegel | 1fcb189a94 | |
Nicolas Humbert | 78d7c91a60 | |
Lauren Spiegel | 5579442ec9 | |
Dora Korpar | 4b2e2b9704 | |
Lauren Spiegel | fc8c48d3db | |
Dora Korpar | 19cc6eb9ba | |
Lauren Spiegel | 5b1633ac63 | |
Nicolas Humbert | 803ad60bb3 | |
Lauren Spiegel | 3a036de12b | |
Jonathan Gramain | f341c98e71 | |
Lauren Spiegel | 1bddac12e3 | |
Dora Korpar | d4b5ff661a | |
Dora Korpar | e6de5b6e3c | |
Lauren Spiegel | 84a7e6f59d | |
Dora Korpar | 8cee39c2ad | |
Lauren Spiegel | cf0e32b4d1 | |
Nicolas Humbert | 0c5154819e | |
Lauren Spiegel | 56e227355a | |
Bennett Buchanan | 9596f5ea22 | |
Bennett Buchanan | b1c7e2c501 | |
Lauren Spiegel | 3e1d52dd41 | |
Lauren Spiegel | e3473c5f28 | |
Lauren Spiegel | 1b14088038 | |
Dora Korpar | a34c3ddab8 | |
Lauren Spiegel | e1700d6841 | |
Nicolas Humbert | 925fe724b4 | |
Lauren Spiegel | 9fccec21ee | |
Lauren Spiegel | e342716607 | |
Lauren Spiegel | 9999cf7bc8 | |
Anne Hohenberger | 8c9f1cd077 | |
Lauren Spiegel | fae2c494c0 | |
Evgeny Rudinsky | d6a3d661e0 | |
Lauren Spiegel | 626596dcd5 | |
Lauren Spiegel | c240d0181b | |
Lauren Spiegel | 86ed062300 | |
Nicolas Humbert | 785eb700e0 | |
Lauren Spiegel | df9ca52506 | |
Nicolas Humbert | facde83c55 | |
Lauren Spiegel | c1c5fd7b2d | |
Electra Chong | 1b939d4420 | |
Lauren Spiegel | 28bf60232b | |
Dora Korpar | a1aa90ab7b | |
Lauren Spiegel | d69c0bd2e6 | |
Lauren Spiegel | f549a1bc0f | |
Lauren Spiegel | e096c4991b | |
Nicolas Humbert | 210e224152 | |
Nicolas Humbert | 38e6044e46 | |
Lauren Spiegel | 8991faf1b6 | |
Rached Ben Mustapha | 02b796b4c5 | |
Lauren Spiegel | a55ed8fde4 | |
Electra Chong | f9031e33d0 | |
Electra Chong | 2e405c84b0 | |
Lauren Spiegel | 6f15bd6dbd | |
Nicolas Humbert | d16bf2881c | |
Alexandre Merle | 6e37e1efbe | |
Lauren Spiegel | 4b8c4dbe01 | |
Guillaume Gimenez | a06f49bd7e | |
Lauren Spiegel | a29fdc8688 | |
Guillaume Gimenez | 8fc401536d | |
Nicolas Humbert | 7024beb748 | |
Lauren Spiegel | fd8e713746 | |
Electra Chong | 5110121d63 | |
Electra Chong | d3b8c078f2 | |
Electra Chong | 90e1ccc68b | |
Electra Chong | 7d4de30d6e | |
Lauren Spiegel | 2c02e0c13c | |
Bennett Buchanan | 91e98df6ce | |
Lauren Spiegel | 8232a1a66b | |
Nicolas Humbert | 576172ba05 | |
Lauren Spiegel | 2783f0b4a2 | |
Electra Chong | ec9466b816 | |
Lauren Spiegel | ea3240b1ec | |
Nicolas Humbert | e771d8f593 | |
Lauren Spiegel | 62ccd2885c | |
Dora Korpar | 2fbcdf35c0 | |
Lauren Spiegel | 79247f8802 | |
Nicolas Humbert | 87ab2d31df | |
Lauren Spiegel | fb62007a1e | |
Nicolas Humbert | 27fd44ac74 | |
Lauren Spiegel | 26988aa188 | |
Nicolas Humbert | 87b694c20b | |
Lauren Spiegel | 224799f889 | |
Nicolas Humbert | 721a7a4bb4 | |
Rahul Padigela | 0625484ba0 | |
Rahul Padigela | 48c3d244a2 | |
Lauren Spiegel | 28c9dad3da | |
David Pineau | cf269ff65c |
7
.babelrc
7
.babelrc
|
@ -1,7 +0,0 @@
|
||||||
{
|
|
||||||
"plugins": [
|
|
||||||
"transform-es2015-destructuring",
|
|
||||||
"transform-es2015-modules-commonjs",
|
|
||||||
"transform-es2015-parameters"
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -0,0 +1,54 @@
|
||||||
|
# Issue template
|
||||||
|
|
||||||
|
If you are reporting a new issue, make sure that we do not have any
|
||||||
|
duplicates already open. You can ensure this by searching the issue list for
|
||||||
|
this repository. If there is a duplicate, please close your issue and add a
|
||||||
|
comment to the existing issue instead.
|
||||||
|
|
||||||
|
## General support information
|
||||||
|
|
||||||
|
GitHub Issues are reserved for actionable bug reports and feature requests.
|
||||||
|
General questions should be sent to the
|
||||||
|
[S3 scality server Forum](http://forum.scality.com/).
|
||||||
|
|
||||||
|
## Bug report information
|
||||||
|
|
||||||
|
(delete this section if not applicable)
|
||||||
|
|
||||||
|
### Description
|
||||||
|
|
||||||
|
Briefly describe the problem you are having in a few paragraphs.
|
||||||
|
|
||||||
|
### Steps to reproduce the issue
|
||||||
|
|
||||||
|
Please provide steps to reproduce, including full log output
|
||||||
|
|
||||||
|
### Actual result
|
||||||
|
|
||||||
|
Describe the results you received
|
||||||
|
|
||||||
|
### Expected result
|
||||||
|
|
||||||
|
Describe the results you expecteds
|
||||||
|
|
||||||
|
### Additional information: (Node.js version, Docker version, etc)
|
||||||
|
|
||||||
|
## Feature Request
|
||||||
|
|
||||||
|
(delete this section if not applicable)
|
||||||
|
|
||||||
|
### Proposal
|
||||||
|
|
||||||
|
Describe the feature
|
||||||
|
|
||||||
|
### Current behavior
|
||||||
|
|
||||||
|
What currently happens
|
||||||
|
|
||||||
|
### Desired behavior
|
||||||
|
|
||||||
|
What you would like to happen
|
||||||
|
|
||||||
|
### Use case
|
||||||
|
|
||||||
|
Please provide use cases for changing the current behavior
|
|
@ -0,0 +1,28 @@
|
||||||
|
# Pull request template
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
### Motivation and context
|
||||||
|
|
||||||
|
Why is this change required? What problem does it solve?
|
||||||
|
|
||||||
|
### Related issues
|
||||||
|
|
||||||
|
Please use the following link syntaxes #600 to reference issues in the
|
||||||
|
current repository
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
### Add tests to cover the changes
|
||||||
|
|
||||||
|
New tests added or existing tests modified to cover all changes
|
||||||
|
|
||||||
|
### Code conforms with the [style guide](https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md#coding-style-guidelines)
|
||||||
|
|
||||||
|
### Sign your work
|
||||||
|
|
||||||
|
In order to contribute to the project, you must sign your work
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md#sign-your-work
|
||||||
|
|
||||||
|
Thank you again for contributing! We will try to test and integrate the change
|
||||||
|
as soon as we can.
|
|
@ -56,6 +56,8 @@ Right now, the following operations are implemented:
|
||||||
- Put Bucket Website
|
- Put Bucket Website
|
||||||
- Get Bucket Website
|
- Get Bucket Website
|
||||||
- Delete Bucket Website
|
- Delete Bucket Website
|
||||||
|
- Put Bucket Versioning
|
||||||
|
- Get Bucket Versioning
|
||||||
- v2 Authentication
|
- v2 Authentication
|
||||||
- v4 Authentication (Transferring Payload in a Single Chunk)
|
- v4 Authentication (Transferring Payload in a Single Chunk)
|
||||||
- v4 Authentication (Transferring Payload in Multiple Chunks)
|
- v4 Authentication (Transferring Payload in Multiple Chunks)
|
||||||
|
|
118
DOCKER.md
118
DOCKER.md
|
@ -1,118 +0,0 @@
|
||||||
# Using S3 for continuous integration testing or in production with Docker
|
|
||||||
|
|
||||||
* [For continuous integration with Docker](#for-continuous-integration-with-docker)
|
|
||||||
* [Environment Variables](#environment-variables)
|
|
||||||
* [In production with Docker](#in-production-with-docker)
|
|
||||||
* [Using Docker Volume in production](#using-docker-volume-in-production)
|
|
||||||
* [Adding modifying or deleting accounts or users credentials](#adding-modifying-or-deleting-accounts-or-users-credentials)
|
|
||||||
* [Specifying your own host name](#specifying-your-own-host-name)
|
|
||||||
|
|
||||||
## For continuous integration with Docker
|
|
||||||
|
|
||||||
When you start the Docker Scality S3 server image, you can adjust the
|
|
||||||
configuration of the Scality S3 server instance by passing one or more
|
|
||||||
environment variables on the docker run command line.
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
#### HOST_NAME
|
|
||||||
|
|
||||||
This variable specifies a host name.
|
|
||||||
If you have a domain such as example.com, by specifying that here,
|
|
||||||
you and your users can direct s3 server requests to example.com.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -d --name s3server -p 8000:8000 -e HOST_NAME=new.host.com scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
#### ACCESS_KEY and SECRET_KEY
|
|
||||||
|
|
||||||
These variables specify authentication credentials for an account
|
|
||||||
named "Docker".
|
|
||||||
|
|
||||||
You can set credentials for many accounts by editing `conf/authdata.json`
|
|
||||||
(see below for further info),
|
|
||||||
but if you just want to specify one set of your own,
|
|
||||||
you can use these environment variables.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -d --name s3server -p 8000:8000 -e ACCESS_KEY=newAccessKey -e
|
|
||||||
SECRET_KEY=newSecretKey scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
#### LOG_LEVEL
|
|
||||||
|
|
||||||
This variable allows you to change the log level: info, debug or trace.
|
|
||||||
The default is info. Debug will give you more detailed logs and trace
|
|
||||||
will give you the most detailed.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
## In production with Docker
|
|
||||||
|
|
||||||
### Using Docker Volume in production
|
|
||||||
|
|
||||||
S3 server runs with a file backend by default.
|
|
||||||
|
|
||||||
So, by default, the data is stored inside your S3 server Docker container.
|
|
||||||
|
|
||||||
However, if you want your data and metadata to persist, you **MUST** use Docker
|
|
||||||
volumes to host your data and metadata outside your s3 server Docker container.
|
|
||||||
Otherwise, the data and metadata will be destroyed when you erase the container.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
|
||||||
-p 8000:8000 -d scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
This command mounts the host directory, `./data`, into the container at
|
|
||||||
/usr/src/app/localData and the host directory, `./metadata`, into the container
|
|
||||||
at /usr/src/app/localMetaData. It can also be any host mount point,
|
|
||||||
like `/mnt/data` and `/mnt/metadata`.
|
|
||||||
|
|
||||||
### Adding modifying or deleting accounts or users credentials
|
|
||||||
|
|
||||||
1. Create locally a customized `authdata.json`.
|
|
||||||
|
|
||||||
2. Use [Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/)
|
|
||||||
|
|
||||||
to override the default `authdata.json` through a docker file mapping.
|
|
||||||
For example:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
|
|
||||||
scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
### Specifying your own host name
|
|
||||||
|
|
||||||
To specify a host name (e.g. s3.domain.name),
|
|
||||||
you can provide your own
|
|
||||||
[config.json](https://github.com/scality/S3/blob/master/config.json)
|
|
||||||
using [Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/).
|
|
||||||
|
|
||||||
First add a new key-value pair in the regions section of your config.json.
|
|
||||||
The key in the key-value pair should be your "region" name and the value
|
|
||||||
is an array containing any host name you would like to add:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"regions": {
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
"localregion": ["localhost"],
|
|
||||||
"specifiedregion": ["s3.domain.name"]
|
|
||||||
},
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, run your Scality S3 Server using
|
|
||||||
[Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/):
|
|
||||||
|
|
||||||
```shell
|
|
||||||
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
|
|
||||||
```
|
|
||||||
|
|
||||||
Your local `config.json` file will override the default one through a docker
|
|
||||||
file mapping.
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM node:4-slim
|
FROM node:6-slim
|
||||||
MAINTAINER Giorgio Regni <gr@scality.com>
|
MAINTAINER Giorgio Regni <gr@scality.com>
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
@ -6,9 +6,9 @@ WORKDIR /usr/src/app
|
||||||
COPY . /usr/src/app
|
COPY . /usr/src/app
|
||||||
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y python git build-essential --no-install-recommends \
|
&& apt-get install -y jq python git build-essential --no-install-recommends \
|
||||||
&& npm install --production \
|
&& npm install --production \
|
||||||
&& apt-get autoremove -y python build-essential \
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& npm cache clear \
|
&& npm cache clear \
|
||||||
&& rm -rf ~/.node-gyp \
|
&& rm -rf ~/.node-gyp \
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM node:4-slim
|
FROM node:6-slim
|
||||||
MAINTAINER Giorgio Regni <gr@scality.com>
|
MAINTAINER Giorgio Regni <gr@scality.com>
|
||||||
|
|
||||||
WORKDIR /usr/src/app
|
WORKDIR /usr/src/app
|
||||||
|
@ -8,7 +8,7 @@ COPY . /usr/src/app
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y python git build-essential --no-install-recommends \
|
&& apt-get install -y python git build-essential --no-install-recommends \
|
||||||
&& npm install --production \
|
&& npm install --production \
|
||||||
&& apt-get autoremove -y python build-essential \
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& npm cache clear \
|
&& npm cache clear \
|
||||||
&& rm -rf ~/.node-gyp \
|
&& rm -rf ~/.node-gyp \
|
||||||
|
|
2
LICENSE
2
LICENSE
|
@ -176,7 +176,7 @@
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
Copyright 2016 Scality
|
Copyright 2015-2017 Scality
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
362
README.md
362
README.md
|
@ -1,12 +1,20 @@
|
||||||
# S3 Server
|
# Scality S3 Server
|
||||||
|
|
||||||
![S3 Server logo](res/Scality-S3-Server-Logo-Large.png)
|
![S3 Server logo](res/Scality-S3-Server-Logo-Large.png)
|
||||||
|
|
||||||
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
|
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
|
||||||
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
|
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/s3server)
|
||||||
|
|
||||||
## Learn more at [s3.scality.com](http://s3.scality.com)
|
## Learn more at [s3.scality.com](http://s3.scality.com)
|
||||||
|
|
||||||
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
|
|
||||||
|
## Docker
|
||||||
|
|
||||||
|
[Run your S3 server with Docker](https://hub.docker.com/r/scality/s3server/)
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
In order to contribute, please follow the
|
In order to contribute, please follow the
|
||||||
|
@ -17,7 +25,7 @@ https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
### Dependencies
|
### Dependencies
|
||||||
|
|
||||||
Building and running the S3 Server requires node.js 4.5 and npm v2
|
Building and running the Scality S3 Server requires node.js 6.9.5 and npm v3
|
||||||
. Up-to-date versions can be found at
|
. Up-to-date versions can be found at
|
||||||
[Nodesource](https://github.com/nodesource/distributions).
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
|
@ -41,7 +49,10 @@ npm install
|
||||||
npm start
|
npm start
|
||||||
```
|
```
|
||||||
|
|
||||||
This starts an S3 server on port 8000.
|
This starts an S3 server on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
The default access key is accessKey1 with
|
The default access key is accessKey1 with
|
||||||
a secret key of verySecretKey1.
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
@ -61,6 +72,35 @@ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
npm start
|
npm start
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts an S3 server on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section below to learn how to set
|
||||||
|
location constraints.
|
||||||
|
|
||||||
## Run it with an in-memory backend
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -71,319 +111,7 @@ This starts an S3 server on port 8000.
|
||||||
The default access key is accessKey1 with
|
The default access key is accessKey1 with
|
||||||
a secret key of verySecretKey1.
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
## Run it for continuous integration testing or in production with Docker
|
[badgetwitter]: https://img.shields.io/twitter/follow/s3server.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
[DOCKER.md](DOCKER.md)
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
You can run the unit tests with the following command:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
npm test
|
|
||||||
```
|
|
||||||
|
|
||||||
You can run the linter with:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
npm run lint
|
|
||||||
```
|
|
||||||
|
|
||||||
Running functional tests locally:
|
|
||||||
|
|
||||||
The test suite requires additional tools, **s3cmd** and **Redis** installed
|
|
||||||
in the environment the tests are running in.
|
|
||||||
|
|
||||||
* Install [s3cmd](http://s3tools.org/download)
|
|
||||||
* Install [redis](https://redis.io/download) and start Redis.
|
|
||||||
* Add localCache section to your `config.json`:
|
|
||||||
|
|
||||||
```
|
|
||||||
"localCache": {
|
|
||||||
"host": REDIS_HOST,
|
|
||||||
"port": REDIS_PORT
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
where `REDIS_HOST` is your Redis instance IP address (`"127.0.0.1"` if your
|
|
||||||
Redis is running locally)
|
|
||||||
and `REDIS_PORT` is your Redis instance port (`6379` by default)
|
|
||||||
|
|
||||||
* Add the following to the etc/hosts file on your machine:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
|
||||||
```
|
|
||||||
|
|
||||||
* Start the S3 server in memory and run the functional tests:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
npm run mem_backend
|
|
||||||
npm run ft_test
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
If you want to specify an endpoint (other than localhost),
|
|
||||||
you need to add it to your config.json:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"regions": {
|
|
||||||
|
|
||||||
"localregion": ["localhost"],
|
|
||||||
"specifiedregion": ["myhostname.com"]
|
|
||||||
},
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that our S3server supports both:
|
|
||||||
|
|
||||||
- path-style: http://myhostname.com/mybucket
|
|
||||||
- hosted-style: http://mybucket.myhostname.com
|
|
||||||
|
|
||||||
However, hosted-style requests will not hit the server if you are
|
|
||||||
using an ip address for your host.
|
|
||||||
So, make sure you are using path-style requests in that case.
|
|
||||||
For instance, if you are using the AWS SDK for JavaScript,
|
|
||||||
you would instantiate your client like this:
|
|
||||||
|
|
||||||
```js
|
|
||||||
const s3 = new aws.S3({
|
|
||||||
endpoint: 'http://127.0.0.1:8000',
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
||||||
## Getting started: List of applications that have been tested with S3 Server
|
|
||||||
|
|
||||||
### GUI
|
|
||||||
|
|
||||||
#### [Cyberduck](https://cyberduck.io/?l=en)
|
|
||||||
|
|
||||||
- https://www.youtube.com/watch?v=-n2MCt4ukUg
|
|
||||||
- https://www.youtube.com/watch?v=IyXHcu4uqgU
|
|
||||||
|
|
||||||
#### [Cloud Explorer](https://www.linux-toys.com/?p=945)
|
|
||||||
|
|
||||||
- https://www.youtube.com/watch?v=2hhtBtmBSxE
|
|
||||||
|
|
||||||
### Command Line Tools
|
|
||||||
|
|
||||||
#### [s3curl](https://github.com/rtdp/s3curl)
|
|
||||||
|
|
||||||
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
|
|
||||||
|
|
||||||
#### [aws-cli](http://docs.aws.amazon.com/cli/latest/reference/)
|
|
||||||
|
|
||||||
`~/.aws/credentials` on Linux, OS X, or Unix or
|
|
||||||
`C:\Users\USERNAME\.aws\credentials` on Windows
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[default]
|
|
||||||
aws_access_key_id = accessKey1
|
|
||||||
aws_secret_access_key = verySecretKey1
|
|
||||||
```
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
aws s3 ls --endpoint-url=http://localhost:8000
|
|
||||||
```
|
|
||||||
|
|
||||||
#### [s3cmd](http://s3tools.org/s3cmd)
|
|
||||||
|
|
||||||
If using s3cmd as a client to S3 be aware that v4 signature format
|
|
||||||
is buggy in s3cmd versions < 1.6.1.
|
|
||||||
|
|
||||||
`~/.s3cfg` on Linux, OS X, or Unix or
|
|
||||||
`C:\Users\USERNAME\.s3cfg` on Windows
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[default]
|
|
||||||
access_key = accessKey1
|
|
||||||
secret_key = verySecretKey1
|
|
||||||
host_base = localhost:8000
|
|
||||||
host_bucket = %(bucket).localhost:8000
|
|
||||||
signature_v2 = False
|
|
||||||
use_https = False
|
|
||||||
```
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
s3cmd ls
|
|
||||||
```
|
|
||||||
|
|
||||||
#### [rclone](http://rclone.org/s3/)
|
|
||||||
|
|
||||||
`~/.rclone.conf` on Linux, OS X, or Unix or
|
|
||||||
`C:\Users\USERNAME\.rclone.conf` on Windows
|
|
||||||
|
|
||||||
```shell
|
|
||||||
[remote]
|
|
||||||
type = s3
|
|
||||||
env_auth = false
|
|
||||||
access_key_id = accessKey1
|
|
||||||
secret_access_key = verySecretKey1
|
|
||||||
region = other-v2-signature
|
|
||||||
endpoint = http://localhost:8000
|
|
||||||
location_constraint =
|
|
||||||
acl = private
|
|
||||||
server_side_encryption =
|
|
||||||
storage_class =
|
|
||||||
```
|
|
||||||
|
|
||||||
See all buckets:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
rclone lsd remote:
|
|
||||||
```
|
|
||||||
|
|
||||||
### JavaScript
|
|
||||||
|
|
||||||
#### [AWS JavaScript SDK](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html)
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
const AWS = require('aws-sdk');
|
|
||||||
|
|
||||||
const s3 = new AWS.S3({
|
|
||||||
accessKeyId: 'accessKey1',
|
|
||||||
secretAccessKey: 'verySecretKey1',
|
|
||||||
endpoint: 'localhost:8000',
|
|
||||||
sslEnabled: false,
|
|
||||||
s3ForcePathStyle: true,
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
### JAVA
|
|
||||||
|
|
||||||
#### [AWS JAVA SDK](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html)
|
|
||||||
|
|
||||||
```java
|
|
||||||
import com.amazonaws.auth.AWSCredentials;
|
|
||||||
import com.amazonaws.auth.BasicAWSCredentials;
|
|
||||||
import com.amazonaws.services.s3.AmazonS3;
|
|
||||||
import com.amazonaws.services.s3.AmazonS3Client;
|
|
||||||
import com.amazonaws.services.s3.S3ClientOptions;
|
|
||||||
import com.amazonaws.services.s3.model.Bucket;
|
|
||||||
|
|
||||||
public class S3 {
|
|
||||||
|
|
||||||
public static void main(String[] args) {
|
|
||||||
|
|
||||||
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
|
|
||||||
"verySecretKey1");
|
|
||||||
|
|
||||||
// Create a client connection based on credentials
|
|
||||||
AmazonS3 s3client = new AmazonS3Client(credentials);
|
|
||||||
s3client.setEndpoint("http://localhost:8000");
|
|
||||||
// Using path-style requests
|
|
||||||
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
|
|
||||||
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
|
|
||||||
|
|
||||||
// Create bucket
|
|
||||||
String bucketName = "javabucket";
|
|
||||||
s3client.createBucket(bucketName);
|
|
||||||
|
|
||||||
// List off all buckets
|
|
||||||
for (Bucket bucket : s3client.listBuckets()) {
|
|
||||||
System.out.println(" - " + bucket.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Ruby
|
|
||||||
|
|
||||||
### [AWS SDK for Ruby - Version 2](http://docs.aws.amazon.com/sdkforruby/api/)
|
|
||||||
|
|
||||||
```ruby
|
|
||||||
require 'aws-sdk'
|
|
||||||
|
|
||||||
s3 = Aws::S3::Client.new(
|
|
||||||
:access_key_id => 'accessKey1',
|
|
||||||
:secret_access_key => 'verySecretKey1',
|
|
||||||
:endpoint => 'http://localhost:8000',
|
|
||||||
:force_path_style => true
|
|
||||||
)
|
|
||||||
|
|
||||||
resp = s3.list_buckets
|
|
||||||
```
|
|
||||||
|
|
||||||
#### [fog](http://fog.io/storage/)
|
|
||||||
|
|
||||||
```ruby
|
|
||||||
require "fog"
|
|
||||||
|
|
||||||
connection = Fog::Storage.new(
|
|
||||||
{
|
|
||||||
:provider => "AWS",
|
|
||||||
:aws_access_key_id => 'accessKey1',
|
|
||||||
:aws_secret_access_key => 'verySecretKey1',
|
|
||||||
:endpoint => 'http://localhost:8000',
|
|
||||||
:path_style => true,
|
|
||||||
:scheme => 'http',
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### Python
|
|
||||||
|
|
||||||
#### [boto2](http://boto.cloudhackers.com/en/latest/ref/s3.html)
|
|
||||||
|
|
||||||
```python
|
|
||||||
import boto
|
|
||||||
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
|
||||||
|
|
||||||
|
|
||||||
connection = S3Connection(
|
|
||||||
aws_access_key_id='accessKey1',
|
|
||||||
aws_secret_access_key='verySecretKey1',
|
|
||||||
is_secure=False,
|
|
||||||
port=8000,
|
|
||||||
calling_format=OrdinaryCallingFormat(),
|
|
||||||
host='localhost'
|
|
||||||
)
|
|
||||||
|
|
||||||
connection.create_bucket('mybucket')
|
|
||||||
```
|
|
||||||
|
|
||||||
#### [boto3](http://boto3.readthedocs.io/en/latest/index.html)
|
|
||||||
|
|
||||||
``` python
|
|
||||||
import boto3
|
|
||||||
client = boto3.client(
|
|
||||||
's3',
|
|
||||||
aws_access_key_id='accessKey1',
|
|
||||||
aws_secret_access_key='verySecretKey1',
|
|
||||||
endpoint_url='http://localhost:8000'
|
|
||||||
)
|
|
||||||
|
|
||||||
lists = client.list_buckets()
|
|
||||||
```
|
|
||||||
|
|
||||||
### PHP
|
|
||||||
|
|
||||||
Should use v3 over v2 because v2 would create virtual-hosted style URLs
|
|
||||||
while v3 generates path-style URLs.
|
|
||||||
|
|
||||||
#### [AWS PHP SDK v3](https://docs.aws.amazon.com/aws-sdk-php/v3/guide)
|
|
||||||
|
|
||||||
```php
|
|
||||||
use Aws\S3\S3Client;
|
|
||||||
|
|
||||||
$client = S3Client::factory([
|
|
||||||
'region' => 'us-east-1',
|
|
||||||
'version' => 'latest',
|
|
||||||
'endpoint' => 'http://localhost:8000',
|
|
||||||
'credentials' => [
|
|
||||||
'key' => 'accessKey1',
|
|
||||||
'secret' => 'verySecretKey1'
|
|
||||||
]
|
|
||||||
]);
|
|
||||||
|
|
||||||
$client->createBucket(array(
|
|
||||||
'Bucket' => 'bucketphp',
|
|
||||||
));
|
|
||||||
```
|
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
---
|
||||||
|
theme: jekyll-theme-minimal
|
|
@ -2,5 +2,4 @@
|
||||||
// 2>/dev/null ; exec "$(which nodejs || which node)" "$0" "$@"
|
// 2>/dev/null ; exec "$(which nodejs || which node)" "$0" "$@"
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('babel-core/register');
|
|
||||||
require('../lib/kms/utilities.js').createEncryptedBucket();
|
require('../lib/kms/utilities.js').createEncryptedBucket();
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('babel-core/register');
|
|
||||||
require('../lib/utapi/utilities.js').listMetrics('buckets');
|
require('../lib/utapi/utilities.js').listMetrics('buckets');
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
#!/usr/bin/env node
|
#!/usr/bin/env node
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('babel-core/register');
|
|
||||||
require('../lib/utapi/utilities.js').listMetrics();
|
require('../lib/utapi/utilities.js').listMetrics();
|
||||||
|
|
17
circle.yml
17
circle.yml
|
@ -8,7 +8,7 @@ general:
|
||||||
|
|
||||||
machine:
|
machine:
|
||||||
node:
|
node:
|
||||||
version: 4.5.0
|
version: 6.9.5
|
||||||
ruby:
|
ruby:
|
||||||
version: "2.4.1"
|
version: "2.4.1"
|
||||||
services:
|
services:
|
||||||
|
@ -42,6 +42,21 @@ test:
|
||||||
|
|
||||||
- mkdir -p $CIRCLE_TEST_REPORTS/unit
|
- mkdir -p $CIRCLE_TEST_REPORTS/unit
|
||||||
- npm run unit_coverage
|
- npm run unit_coverage
|
||||||
|
- npm run start_dmd & bash wait_for_local_port.bash 9990 40
|
||||||
|
&& npm run multiple_backend_test
|
||||||
|
|
||||||
|
# Run S3 with multiple data backends ; run ft_awssdk
|
||||||
|
- S3BACKEND=mem MPU_TESTING=yes S3DATA=multiple npm start
|
||||||
|
> $CIRCLE_ARTIFACTS/server_multiple_awssdk.txt
|
||||||
|
& bash wait_for_local_port.bash 8000 40
|
||||||
|
&& S3DATA=multiple npm run ft_awssdk
|
||||||
|
|
||||||
|
# Run S3 with multiple data backends + KMS Encryption; run ft_awssdk
|
||||||
|
- S3BACKEND=mem MPU_TESTING=yes S3DATA=multiple npm start
|
||||||
|
> $CIRCLE_ARTIFACTS/server_multiple_kms_awssdk.txt
|
||||||
|
& bash wait_for_local_port.bash 8000 40
|
||||||
|
&& S3DATA=multiple ENABLE_KMS_ENCRYPTION=true npm run ft_awssdk
|
||||||
|
|
||||||
# Run S3 with mem Backend ; run ft_tests
|
# Run S3 with mem Backend ; run ft_tests
|
||||||
- S3BACKEND=mem npm start
|
- S3BACKEND=mem npm start
|
||||||
> $CIRCLE_ARTIFACTS/server_mem_java.txt
|
> $CIRCLE_ARTIFACTS/server_mem_java.txt
|
||||||
|
|
45
config.json
45
config.json
|
@ -1,24 +1,13 @@
|
||||||
{
|
{
|
||||||
"port": 8000,
|
"port": 8000,
|
||||||
"listenOn": [],
|
"listenOn": [],
|
||||||
"regions": {
|
"replicationGroupId": "RG001",
|
||||||
"ap-northeast-1": ["s3.ap-northeast-1.amazonaws.com"],
|
"restEndpoints": {
|
||||||
"ap-southeast-1": ["s3.ap-southeast-1.amazonaws.com"],
|
"localhost": "file",
|
||||||
"ap-southeast-2": ["s3.ap-southeast-2.amazonaws.com"],
|
"127.0.0.1": "file",
|
||||||
"eu-central-1": ["s3.eu-central-1.amazonaws.com",
|
"s3.docker.test": "us-east-1",
|
||||||
"s3.eu.central-1.amazonaws.com"],
|
"127.0.0.2": "us-east-1",
|
||||||
"eu-west-1": ["s3.eu-west-1.amazonaws.com"],
|
"s3.amazonaws.com": "us-east-1"
|
||||||
"sa-east-1": ["s3.sa-east-1.amazonaws.com"],
|
|
||||||
"us-east-1": ["s3.amazonaws.com",
|
|
||||||
"s3-external-1.amazonaws.com",
|
|
||||||
"s3.us-east-1.amazonaws.com"],
|
|
||||||
"us-west-1": ["s3.us-west-1.amazonaws.com"],
|
|
||||||
"us-west-2": ["s3-us-west-2.amazonaws.com"],
|
|
||||||
"us-gov-west-1": ["s3-us-gov-west-1.amazonaws.com",
|
|
||||||
"s3-fips-us-gov-west-1.amazonaws.com"],
|
|
||||||
"localregion": ["localhost"],
|
|
||||||
"test-region": ["s3.scality.test"],
|
|
||||||
"docker-region": ["s3.docker.test"]
|
|
||||||
},
|
},
|
||||||
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
|
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
|
||||||
"s3-website.us-east-2.amazonaws.com",
|
"s3-website.us-east-2.amazonaws.com",
|
||||||
|
@ -34,9 +23,6 @@
|
||||||
"s3-website-sa-east-1.amazonaws.com",
|
"s3-website-sa-east-1.amazonaws.com",
|
||||||
"s3-website.localhost",
|
"s3-website.localhost",
|
||||||
"s3-website.scality.test"],
|
"s3-website.scality.test"],
|
||||||
"sproxyd": {
|
|
||||||
"bootstrap": ["localhost:8181"]
|
|
||||||
},
|
|
||||||
"bucketd": {
|
"bucketd": {
|
||||||
"bootstrap": ["localhost"]
|
"bootstrap": ["localhost"]
|
||||||
},
|
},
|
||||||
|
@ -52,5 +38,20 @@
|
||||||
"healthChecks": {
|
"healthChecks": {
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||||
},
|
},
|
||||||
"usEastBehavior": false
|
"metadataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9990
|
||||||
|
},
|
||||||
|
"dataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9991
|
||||||
|
},
|
||||||
|
"metadataDaemon": {
|
||||||
|
"bindAddress": "localhost",
|
||||||
|
"port": 9990
|
||||||
|
},
|
||||||
|
"dataDaemon": {
|
||||||
|
"bindAddress": "localhost",
|
||||||
|
"port": 9991
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
18
constants.js
18
constants.js
|
@ -1,6 +1,6 @@
|
||||||
import crypto from 'crypto';
|
const crypto = require('crypto');
|
||||||
|
|
||||||
export default {
|
const constants = {
|
||||||
/*
|
/*
|
||||||
* Splitter is used to build the object name for the overview of a
|
* Splitter is used to build the object name for the overview of a
|
||||||
* multipart upload and to build the object names for each part of a
|
* multipart upload and to build the object names for each part of a
|
||||||
|
@ -38,6 +38,7 @@ export default {
|
||||||
// by the name of the final destination bucket for the object
|
// by the name of the final destination bucket for the object
|
||||||
// once the multipart upload is complete.
|
// once the multipart upload is complete.
|
||||||
mpuBucketPrefix: 'mpuShadowBucket',
|
mpuBucketPrefix: 'mpuShadowBucket',
|
||||||
|
blacklistedPrefixes: { bucket: [], object: [] },
|
||||||
// PublicId is used as the canonicalID for a request that contains
|
// PublicId is used as the canonicalID for a request that contains
|
||||||
// no authentication information. Requestor can access
|
// no authentication information. Requestor can access
|
||||||
// only public resources
|
// only public resources
|
||||||
|
@ -68,6 +69,11 @@ export default {
|
||||||
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 104857600 :
|
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 104857600 :
|
||||||
5368709120,
|
5368709120,
|
||||||
|
|
||||||
|
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
|
||||||
|
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
|
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
|
||||||
|
maximumMetaHeadersSize: 2136,
|
||||||
|
|
||||||
// hex digest of sha256 hash of empty string:
|
// hex digest of sha256 hash of empty string:
|
||||||
emptyStringHash: crypto.createHash('sha256')
|
emptyStringHash: crypto.createHash('sha256')
|
||||||
.update('', 'binary').digest('hex'),
|
.update('', 'binary').digest('hex'),
|
||||||
|
@ -79,16 +85,16 @@ export default {
|
||||||
'inventory': true,
|
'inventory': true,
|
||||||
'lifecycle': true,
|
'lifecycle': true,
|
||||||
'list-type': true,
|
'list-type': true,
|
||||||
'location': true,
|
|
||||||
'logging': true,
|
'logging': true,
|
||||||
'metrics': true,
|
'metrics': true,
|
||||||
'notification': true,
|
'notification': true,
|
||||||
'policy': true,
|
'policy': true,
|
||||||
'replication': true,
|
|
||||||
'requestPayment': true,
|
'requestPayment': true,
|
||||||
'restore': true,
|
'restore': true,
|
||||||
'tagging': true,
|
|
||||||
'torrent': true,
|
'torrent': true,
|
||||||
'versions': true,
|
|
||||||
},
|
},
|
||||||
|
// user metadata header to set object locationConstraint
|
||||||
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
};
|
};
|
||||||
|
|
||||||
|
module.exports = constants;
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const { config } = require('./lib/Config.js');
|
||||||
|
const logger = require('./lib/utilities/logger');
|
||||||
|
|
||||||
|
if (config.backends.data === 'file' ||
|
||||||
|
(config.backends.data === 'multiple' &&
|
||||||
|
config.backends.metadata !== 'scality')) {
|
||||||
|
const dataServer = new arsenal.network.rest.RESTServer(
|
||||||
|
{ bindAddress: config.dataDaemon.bindAddress,
|
||||||
|
port: config.dataDaemon.port,
|
||||||
|
dataStore: new arsenal.storage.data.file.DataFileStore(
|
||||||
|
{ dataPath: config.dataDaemon.dataPath,
|
||||||
|
log: config.log }),
|
||||||
|
log: config.log });
|
||||||
|
dataServer.setup(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('Error initializing REST data server',
|
||||||
|
{ error: err });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dataServer.start();
|
||||||
|
});
|
||||||
|
}
|
|
@ -3,15 +3,10 @@
|
||||||
# set -e stops the execution of a script if a command or pipeline has an error
|
# set -e stops the execution of a script if a command or pipeline has an error
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
if [[ "$ACCESS_KEY" && "$SECRET_KEY" ]]; then
|
|
||||||
sed -i "s/accessKeyDocker/$ACCESS_KEY/" ./conf/authdata.json
|
|
||||||
sed -i "s/verySecretKeyDocker/$SECRET_KEY/" ./conf/authdata.json
|
|
||||||
echo "Access key and secret key have been modified successfully"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$HOST_NAME" ]]; then
|
if [[ "$HOST_NAME" ]]; then
|
||||||
sed -i "s/s3.docker.test/$HOST_NAME/" ./config.json
|
sed -i "s/s3.docker.test/$HOST_NAME/" ./config.json
|
||||||
echo "Host name has been modified to $HOST_NAME"
|
echo "Host name has been modified to $HOST_NAME"
|
||||||
|
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "$LOG_LEVEL" ]]; then
|
if [[ "$LOG_LEVEL" ]]; then
|
||||||
|
@ -23,4 +18,61 @@ if [[ "$LOG_LEVEL" ]]; then
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$SSL" ]]; then
|
||||||
|
if [[ -z "$HOST_NAME" ]]; then
|
||||||
|
echo "WARNING! No HOST_NAME has been provided"
|
||||||
|
fi
|
||||||
|
# This condition makes sure that the certificates are not generated twice. (for docker restart)
|
||||||
|
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
|
||||||
|
## Generate SSL key and certificates
|
||||||
|
# Generate a private key for your CSR
|
||||||
|
openssl genrsa -out ca.key 2048
|
||||||
|
# Generate a self signed certificate for your local Certificate Authority
|
||||||
|
openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=$SSL"
|
||||||
|
# Generate a key for S3 Server
|
||||||
|
openssl genrsa -out server.key 2048
|
||||||
|
# Generate a Certificate Signing Request for S3 Server
|
||||||
|
openssl req -new -key server.key -out server.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.$SSL"
|
||||||
|
# Generate a local-CA-signed certificate for S3 Server
|
||||||
|
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 99999 -sha256
|
||||||
|
fi
|
||||||
|
## Update S3Server config.json
|
||||||
|
# This condition makes sure that certFilePaths section is not added twice. (for docker restart)
|
||||||
|
if ! grep -q "certFilePaths" ./config.json; then
|
||||||
|
sed -i "0,/,/s//,\n \"certFilePaths\": { \"key\": \".\/server.key\", \"cert\": \".\/server.crt\", \"ca\": \".\/ca.crt\" },/" ./config.json
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$S3DATA" == "multiple" ]]; then
|
||||||
|
export S3DATA="$S3DATA"
|
||||||
|
fi
|
||||||
|
|
||||||
|
JQ_FILTERS="."
|
||||||
|
|
||||||
|
if [[ "$LISTEN_ADDR" ]]; then
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .listenOn=[\"$LISTEN_ADDR:8000\"]"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$DATA_HOST" ]]; then
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .dataClient.host=\"$DATA_HOST\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$METADATA_HOST" ]]; then
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .metadataClient.host=\"$METADATA_HOST\""
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$REDIS_HOST" ]]; then
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .localCache.host=\"$REDIS_HOST\""
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .localCache.port=6379"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$REDIS_PORT" ]]; then
|
||||||
|
JQ_FILTERS="$JQ_FILTERS | .localCache.port=$REDIS_PORT"
|
||||||
|
fi
|
||||||
|
|
||||||
|
jq "$JQ_FILTERS" config.json > config.json.tmp
|
||||||
|
mv config.json.tmp config.json
|
||||||
|
|
||||||
exec "$@"
|
exec "$@"
|
||||||
|
|
|
@ -0,0 +1,916 @@
|
||||||
|
.. role:: raw-latex(raw)
|
||||||
|
:format: latex
|
||||||
|
..
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
++++++++++++
|
||||||
|
|
||||||
|
Versioning
|
||||||
|
==========
|
||||||
|
|
||||||
|
This document describes S3 Server's support for the AWS S3 Bucket
|
||||||
|
Versioning feature.
|
||||||
|
|
||||||
|
AWS S3 Bucket Versioning
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
See AWS documentation for a description of the Bucket Versioning
|
||||||
|
feature:
|
||||||
|
|
||||||
|
- `Bucket
|
||||||
|
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html>`__
|
||||||
|
- `Object
|
||||||
|
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html>`__
|
||||||
|
|
||||||
|
This document assumes familiarity with the details of Bucket Versioning,
|
||||||
|
including null versions and delete markers, described in the above
|
||||||
|
links.
|
||||||
|
|
||||||
|
Implementation of Bucket Versioning in S3
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
Overview of Metadata and API Component Roles
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Each version of an object is stored as a separate key in metadata. The
|
||||||
|
S3 API interacts with the metadata backend to store, retrieve, and
|
||||||
|
delete version metadata.
|
||||||
|
|
||||||
|
The implementation of versioning within the metadata backend is naive.
|
||||||
|
The metadata backend does not evaluate any information about bucket or
|
||||||
|
version state (whether versioning is enabled or suspended, and whether a
|
||||||
|
version is a null version or delete marker). The S3 front-end API
|
||||||
|
manages the logic regarding versioning information, and sends
|
||||||
|
instructions to metadata to handle the basic CRUD operations for version
|
||||||
|
metadata.
|
||||||
|
|
||||||
|
The role of the S3 API can be broken down into the following:
|
||||||
|
|
||||||
|
- put and delete version data
|
||||||
|
- store extra information about a version, such as whether it is a
|
||||||
|
delete marker or null version, in the object's metadata
|
||||||
|
- send instructions to metadata backend to store, retrieve, update and
|
||||||
|
delete version metadata based on bucket versioning state and version
|
||||||
|
metadata
|
||||||
|
- encode version ID information to return in responses to requests, and
|
||||||
|
decode version IDs sent in requests
|
||||||
|
|
||||||
|
The implementation of Bucket Versioning in S3 is described in this
|
||||||
|
document in two main parts. The first section, `"Implementation of
|
||||||
|
Bucket Versioning in
|
||||||
|
Metadata" <#implementation-of-bucket-versioning-in-metadata>`__,
|
||||||
|
describes the way versions are stored in metadata, and the metadata
|
||||||
|
options for manipulating version metadata.
|
||||||
|
|
||||||
|
The second section, `"Implementation of Bucket Versioning in
|
||||||
|
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
|
||||||
|
the metadata options are used in the API within S3 actions to create new
|
||||||
|
versions, update their metadata, and delete them. The management of null
|
||||||
|
versions and creation of delete markers are also described in this
|
||||||
|
section.
|
||||||
|
|
||||||
|
Implementation of Bucket Versioning in Metadata
|
||||||
|
-----------------------------------------------
|
||||||
|
|
||||||
|
As mentioned above, each version of an object is stored as a separate
|
||||||
|
key in metadata. We use version identifiers as the suffix for the keys
|
||||||
|
of the object versions, and a special version (the `"Master
|
||||||
|
Version" <#master-version>`__) to represent the latest version.
|
||||||
|
|
||||||
|
An example of what the metadata keys might look like for an object
|
||||||
|
``foo/bar`` with three versions (with `.` representing a null character):
|
||||||
|
|
||||||
|
+------------------------------------------------------+
|
||||||
|
| key |
|
||||||
|
+======================================================+
|
||||||
|
| foo/bar |
|
||||||
|
+------------------------------------------------------+
|
||||||
|
| foo/bar.098506163554375999999PARIS 0.a430a1f85c6ec |
|
||||||
|
+------------------------------------------------------+
|
||||||
|
| foo/bar.098506163554373999999PARIS 0.41b510cd0fdf8 |
|
||||||
|
+------------------------------------------------------+
|
||||||
|
| foo/bar.098506163554373999998PARIS 0.f9b82c166f695 |
|
||||||
|
+------------------------------------------------------+
|
||||||
|
|
||||||
|
The most recent version created is represented above in the key
|
||||||
|
``foo/bar`` and is the master version. This special version is described
|
||||||
|
further in the section `"Master Version" <#master-version>`__.
|
||||||
|
|
||||||
|
Version ID and Metadata Key Format
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The version ID is generated by the metadata backend, and encoded in a
|
||||||
|
hexadecimal string format by S3 before sending a response to a request.
|
||||||
|
S3 also decodes the hexadecimal string received from a request before
|
||||||
|
sending to metadata to retrieve a particular version.
|
||||||
|
|
||||||
|
The format of a ``version_id`` is: ``ts`` ``rep_group_id`` ``seq_id``
|
||||||
|
where:
|
||||||
|
|
||||||
|
- ``ts``: is the combination of epoch and an increasing number
|
||||||
|
- ``rep_group_id``: is the name of deployment(s) considered one unit
|
||||||
|
used for replication
|
||||||
|
- ``seq_id``: is a unique value based on metadata information.
|
||||||
|
|
||||||
|
The format of a key in metadata for a version is:
|
||||||
|
|
||||||
|
``object_name separator version_id`` where:
|
||||||
|
|
||||||
|
- ``object_name``: is the key of the object in metadata
|
||||||
|
- ``separator``: we use the ``null`` character (``0x00`` or ``\0``) as
|
||||||
|
the separator between the ``object_name`` and the ``version_id`` of a
|
||||||
|
key
|
||||||
|
- ``version_id``: is the version identifier; this encodes the ordering
|
||||||
|
information in the format described above as metadata orders keys
|
||||||
|
alphabetically
|
||||||
|
|
||||||
|
An example of a key in metadata:
|
||||||
|
``foo\01234567890000777PARIS 1234.123456`` indicating that this specific
|
||||||
|
version of ``foo`` was the ``000777``\ th entry created during the epoch
|
||||||
|
``1234567890`` in the replication group ``PARIS`` with ``1234.123456``
|
||||||
|
as ``seq_id``.
|
||||||
|
|
||||||
|
Master Version
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
We store a copy of the latest version of an object's metadata using
|
||||||
|
``object_name`` as the key; this version is called the master version.
|
||||||
|
The master version of each object facilitates the standard GET
|
||||||
|
operation, which would otherwise need to scan among the list of versions
|
||||||
|
of an object for its latest version.
|
||||||
|
|
||||||
|
The following table shows the layout of all versions of ``foo`` in the
|
||||||
|
first example stored in the metadata (with dot ``.`` representing the
|
||||||
|
null separator):
|
||||||
|
|
||||||
|
+----------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==========+=========+
|
||||||
|
| foo | B |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v2 | B |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v1 | A |
|
||||||
|
+----------+---------+
|
||||||
|
|
||||||
|
Metadata Versioning Options
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
S3 Server sends instructions to the metadata engine about whether to
|
||||||
|
create a new version or overwrite, retrieve, or delete a specific
|
||||||
|
version by sending values for special options in PUT, GET, or DELETE
|
||||||
|
calls to metadata. The metadata engine can also list versions in the
|
||||||
|
database, which is used by S3 to list object versions.
|
||||||
|
|
||||||
|
These only describe the basic CRUD operations that the metadata engine
|
||||||
|
can handle. How these options are used by the S3 API to generate and
|
||||||
|
update versions is described more comprehensively in `"Implementation of
|
||||||
|
Bucket Versioning in
|
||||||
|
API" <#implementation-of-bucket-versioning-in-api>`__.
|
||||||
|
|
||||||
|
Note: all operations (PUT and DELETE) that generate a new version of an
|
||||||
|
object will return the ``version_id`` of the new version to the API.
|
||||||
|
|
||||||
|
PUT
|
||||||
|
^^^
|
||||||
|
|
||||||
|
- no options: original PUT operation, will update the master version
|
||||||
|
- ``versioning: true`` create a new version of the object, then update
|
||||||
|
the master version with this version.
|
||||||
|
- ``versionId: <versionId>`` update a specific version (for updating
|
||||||
|
version's ACL or tags, or remote updates in geo-replication
|
||||||
|
|
||||||
|
- if the version identified by ``versionId`` happens to be the latest
|
||||||
|
version, the master version will be updated as well
|
||||||
|
- note that with ``versionId`` set to an empty string ``''``, it will
|
||||||
|
overwrite the master version only (same as no options, but the master
|
||||||
|
version will have a ``versionId`` property set in its metadata like
|
||||||
|
any other version). The ``versionId`` will never be exposed to an
|
||||||
|
external user, but setting this internal-only ``versionID`` enables
|
||||||
|
S3 to find this version later if it is no longer the master. This
|
||||||
|
option of ``versionId`` set to ``''`` is used for creating null
|
||||||
|
versions once versioning has been suspended, which is discussed in
|
||||||
|
`"Null Version Management" <#null-version-management>`__.
|
||||||
|
|
||||||
|
Only one option is used at a time. ``versionId: <versionId>`` does not
|
||||||
|
have to be used with ``versioning: true`` set to work, nor should they
|
||||||
|
both be set. If both are used at once, the metadata engine will return
|
||||||
|
an error.
|
||||||
|
|
||||||
|
To summarize the valid combinations of versioning options:
|
||||||
|
|
||||||
|
- ``!versioning && !versionId``: normal non-versioning PUT
|
||||||
|
- ``versioning && !versionId``: create a new version, update the master
|
||||||
|
version
|
||||||
|
- ``!versioning && versionId``: update (PUT/DELETE) an existing version
|
||||||
|
- if ``versionId === ''`` update master version
|
||||||
|
|
||||||
|
Other cases are invalid and the metadata engine returns the error
|
||||||
|
``BadRequest``.
|
||||||
|
|
||||||
|
DELETE
|
||||||
|
^^^^^^
|
||||||
|
|
||||||
|
- no options: original DELETE operation, will delete the master version
|
||||||
|
- ``versionId: <versionId>`` delete a specific version
|
||||||
|
|
||||||
|
A deletion targeting the latest version of an object has to:
|
||||||
|
|
||||||
|
- delete the specified version identified by ``versionId``
|
||||||
|
- replace the master version with a version that is a placeholder for
|
||||||
|
deletion
|
||||||
|
- this version contains a special keyword, 'isPHD', to indicate the
|
||||||
|
master version was deleted and needs to be updated
|
||||||
|
- initiate a repair operation to update the value of the master
|
||||||
|
version:
|
||||||
|
- involves listing the versions of the object and get the latest
|
||||||
|
version to replace the placeholder delete version
|
||||||
|
- if no more versions exist, metadata deletes the master version,
|
||||||
|
removing the key from metadata
|
||||||
|
|
||||||
|
Note: all of this happens before responding to S3, and only when the
|
||||||
|
metadata engine is instructed by S3 to delete a specific version or the
|
||||||
|
master version. See section `"Delete Markers" <#delete-markers>`__ for a
|
||||||
|
description of what happens when a Delete Object request is sent to the
|
||||||
|
S3 API.
|
||||||
|
|
||||||
|
GET
|
||||||
|
^^^
|
||||||
|
|
||||||
|
- no options: original GET operation, will get the master version
|
||||||
|
- ``versionId: <versionId>`` retrieve a specific version
|
||||||
|
|
||||||
|
The implementation of a GET operation does not change compared to the
|
||||||
|
standard version. A standard GET without versioning information would
|
||||||
|
get the master version of a key. A version-specific GET would retrieve
|
||||||
|
the specific version identified by the key for that version.
|
||||||
|
|
||||||
|
LIST
|
||||||
|
^^^^
|
||||||
|
|
||||||
|
For a standard LIST on a bucket, metadata iterates through the keys by
|
||||||
|
using the separator (``\0``, represented by ``.`` in examples) as an
|
||||||
|
extra delimiter. For a listing of all versions of a bucket, there is no
|
||||||
|
change compared to the original listing function. Instead, the API
|
||||||
|
component returns all the keys in a List Objects call and filters for
|
||||||
|
just the keys of the master versions in a List Object Versions call.
|
||||||
|
|
||||||
|
For example, a standard LIST operation against the keys in a table below
|
||||||
|
would return from metadata the list of
|
||||||
|
``[ foo/bar, bar, qux/quz, quz ]``.
|
||||||
|
|
||||||
|
+--------------+
|
||||||
|
| key |
|
||||||
|
+==============+
|
||||||
|
| foo/bar |
|
||||||
|
+--------------+
|
||||||
|
| foo/bar.v2 |
|
||||||
|
+--------------+
|
||||||
|
| foo/bar.v1 |
|
||||||
|
+--------------+
|
||||||
|
| bar |
|
||||||
|
+--------------+
|
||||||
|
| qux/quz |
|
||||||
|
+--------------+
|
||||||
|
| qux/quz.v2 |
|
||||||
|
+--------------+
|
||||||
|
| qux/quz.v1 |
|
||||||
|
+--------------+
|
||||||
|
| quz |
|
||||||
|
+--------------+
|
||||||
|
| quz.v2 |
|
||||||
|
+--------------+
|
||||||
|
| quz.v1 |
|
||||||
|
+--------------+
|
||||||
|
|
||||||
|
Implementation of Bucket Versioning in API
|
||||||
|
------------------------------------------
|
||||||
|
|
||||||
|
Object Metadata Versioning Attributes
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To access all the information needed to properly handle all cases that
|
||||||
|
may exist in versioned operations, the API stores certain
|
||||||
|
versioning-related information in the metadata attributes of each
|
||||||
|
version's object metadata.
|
||||||
|
|
||||||
|
These are the versioning-related metadata properties:
|
||||||
|
|
||||||
|
- ``isNull``: whether the version being stored is a null version.
|
||||||
|
- ``nullVersionId``: the unencoded version ID of the latest null
|
||||||
|
version that existed before storing a non-null version.
|
||||||
|
- ``isDeleteMarker``: whether the version being stored is a delete
|
||||||
|
marker.
|
||||||
|
|
||||||
|
The metadata engine also sets one additional metadata property when
|
||||||
|
creating the version.
|
||||||
|
|
||||||
|
- ``versionId``: the unencoded version ID of the version being stored.
|
||||||
|
|
||||||
|
Null versions and delete markers are described in further detail in
|
||||||
|
their own subsections.
|
||||||
|
|
||||||
|
Creation of New Versions
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
When versioning is enabled in a bucket, APIs which normally result in
|
||||||
|
the creation of objects, such as Put Object, Complete Multipart Upload
|
||||||
|
and Copy Object, will generate new versions of objects.
|
||||||
|
|
||||||
|
S3 creates a new version and updates the master version using the
|
||||||
|
``versioning: true`` option in PUT calls to the metadata engine. As an
|
||||||
|
example, when two consecutive Put Object requests are sent to the S3
|
||||||
|
Server for a versioning-enabled bucket with the same key names, there
|
||||||
|
are two corresponding metadata PUT calls with the ``versioning`` option
|
||||||
|
set to true.
|
||||||
|
|
||||||
|
The PUT calls to metadata and resulting keys are shown below:
|
||||||
|
|
||||||
|
(1) PUT foo (first put), versioning: ``true``
|
||||||
|
|
||||||
|
+----------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==========+=========+
|
||||||
|
| foo | A |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v1 | A |
|
||||||
|
+----------+---------+
|
||||||
|
|
||||||
|
(2) PUT foo (second put), versioning: ``true``
|
||||||
|
|
||||||
|
+----------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==========+=========+
|
||||||
|
| foo | B |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v2 | B |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v1 | A |
|
||||||
|
+----------+---------+
|
||||||
|
|
||||||
|
Null Version Management
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
In a bucket without versioning, or when versioning is suspended, putting
|
||||||
|
an object with the same name twice should result in the previous object
|
||||||
|
being overwritten. This is managed with null versions.
|
||||||
|
|
||||||
|
Only one null version should exist at any given time, and it is
|
||||||
|
identified in S3 requests and responses with the version id "null".
|
||||||
|
|
||||||
|
Case 1: Putting Null Versions
|
||||||
|
'''''''''''''''''''''''''''''
|
||||||
|
|
||||||
|
With respect to metadata, since the null version is overwritten by
|
||||||
|
subsequent null versions, the null version is initially stored in the
|
||||||
|
master key alone, as opposed to being stored in the master key and a new
|
||||||
|
version. S3 checks if versioning is suspended or has never been
|
||||||
|
configured, and sets the ``versionId`` option to ``''`` in PUT calls to
|
||||||
|
the metadata engine when creating a new null version.
|
||||||
|
|
||||||
|
The tables below show the keys resulting from PUT calls to metadata if
|
||||||
|
we put an object 'foo' twice, when versioning has not been enabled or is
|
||||||
|
suspended.
|
||||||
|
|
||||||
|
(1) PUT foo (first put), versionId: ``''``
|
||||||
|
|
||||||
|
+--------------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==============+=========+
|
||||||
|
| foo (null) | A |
|
||||||
|
+--------------+---------+
|
||||||
|
|
||||||
|
(2) PUT foo (second put), versionId: ``''``
|
||||||
|
|
||||||
|
+--------------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==============+=========+
|
||||||
|
| foo (null) | B |
|
||||||
|
+--------------+---------+
|
||||||
|
|
||||||
|
The S3 API also sets the ``isNull`` attribute to ``true`` in the version
|
||||||
|
metadata before storing the metadata for these null versions.
|
||||||
|
|
||||||
|
Case 2: Preserving Existing Null Versions in Versioning-Enabled Bucket
|
||||||
|
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||||
|
|
||||||
|
Null versions are preserved when new non-null versions are created after
|
||||||
|
versioning has been enabled or re-enabled.
|
||||||
|
|
||||||
|
If the master version is the null version, the S3 API preserves the
|
||||||
|
current null version by storing it as a new key ``(3A)`` in a separate
|
||||||
|
PUT call to metadata, prior to overwriting the master version ``(3B)``.
|
||||||
|
This implies the null version may not necessarily be the latest or
|
||||||
|
master version.
|
||||||
|
|
||||||
|
To determine whether the master version is a null version, the S3 API
|
||||||
|
checks if the master version's ``isNull`` property is set to ``true``,
|
||||||
|
or if the ``versionId`` attribute of the master version is undefined
|
||||||
|
(indicating it is a null version that was put before bucket versioning
|
||||||
|
was configured).
|
||||||
|
|
||||||
|
Continuing the example from Case 1, if we enabled versioning and put
|
||||||
|
another object, the calls to metadata and resulting keys would resemble
|
||||||
|
the following:
|
||||||
|
|
||||||
|
(3A) PUT foo, versionId: ``<versionId of master version>`` if defined or
|
||||||
|
``<non-versioned object id>``
|
||||||
|
|
||||||
|
+-----------------+---------+
|
||||||
|
| key | value |
|
||||||
|
+=================+=========+
|
||||||
|
| foo | B |
|
||||||
|
+-----------------+---------+
|
||||||
|
| foo.v1 (null) | B |
|
||||||
|
+-----------------+---------+
|
||||||
|
|
||||||
|
(3B) PUT foo, versioning: ``true``
|
||||||
|
|
||||||
|
+-----------------+---------+
|
||||||
|
| key | value |
|
||||||
|
+=================+=========+
|
||||||
|
| foo | C |
|
||||||
|
+-----------------+---------+
|
||||||
|
| foo.v2 | C |
|
||||||
|
+-----------------+---------+
|
||||||
|
| foo.v1 (null) | B |
|
||||||
|
+-----------------+---------+
|
||||||
|
|
||||||
|
To prevent issues with concurrent requests, S3 ensures the null version
|
||||||
|
is stored with the same version ID by using ``versionId`` option. S3
|
||||||
|
sets the ``versionId`` option to the master version's ``versionId``
|
||||||
|
metadata attribute value during the PUT. This creates a new version with
|
||||||
|
the same version ID of the existing null master version.
|
||||||
|
|
||||||
|
The null version's ``versionId`` attribute may be undefined because it
|
||||||
|
was generated before the bucket versioning was configured. In that case,
|
||||||
|
a version ID is generated using the max epoch and sequence values
|
||||||
|
possible so that the null version will be properly ordered as the last
|
||||||
|
entry in a metadata listing. This value ("non-versioned object id") is
|
||||||
|
used in the PUT call with the ``versionId`` option.
|
||||||
|
|
||||||
|
Case 3: Overwriting a Null Version That is Not Latest Version
|
||||||
|
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||||
|
|
||||||
|
Normally when versioning is suspended, S3 uses the ``versionId: ''``
|
||||||
|
option in a PUT to metadata to create a null version. This also
|
||||||
|
overwrites an existing null version if it is the master version.
|
||||||
|
|
||||||
|
However, if there is a null version that is not the latest version, S3
|
||||||
|
cannot rely on the ``versionId: ''`` option will not overwrite the
|
||||||
|
existing null version. Instead, before creating a new null version, the
|
||||||
|
S3 API must send a separate DELETE call to metadata specifying the
|
||||||
|
version id of the current null version for delete.
|
||||||
|
|
||||||
|
To do this, when storing a null version (3A above) before storing a new
|
||||||
|
non-null version, S3 records the version's ID in the ``nullVersionId``
|
||||||
|
attribute of the non-null version. For steps 3A and 3B above, these are
|
||||||
|
the values stored in the ``nullVersionId`` of each version's metadata:
|
||||||
|
|
||||||
|
(3A) PUT foo, versioning: ``true``
|
||||||
|
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
| key | value | value.nullVersionId |
|
||||||
|
+=================+=========+=======================+
|
||||||
|
| foo | B | undefined |
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
| foo.v1 (null) | B | undefined |
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
|
||||||
|
(3B) PUT foo, versioning: ``true``
|
||||||
|
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
| key | value | value.nullVersionId |
|
||||||
|
+=================+=========+=======================+
|
||||||
|
| foo | C | v1 |
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
| foo.v2 | C | v1 |
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
| foo.v1 (null) | B | undefined |
|
||||||
|
+-----------------+---------+-----------------------+
|
||||||
|
|
||||||
|
If defined, the ``nullVersionId`` of the master version is used with the
|
||||||
|
``versionId`` option in a DELETE call to metadata if a Put Object
|
||||||
|
request is received when versioning is suspended in a bucket.
|
||||||
|
|
||||||
|
(4A) DELETE foo, versionId: ``<nullVersionId of master version>`` (v1)
|
||||||
|
|
||||||
|
+----------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==========+=========+
|
||||||
|
| foo | C |
|
||||||
|
+----------+---------+
|
||||||
|
| foo.v2 | C |
|
||||||
|
+----------+---------+
|
||||||
|
|
||||||
|
Then the master version is overwritten with the new null version:
|
||||||
|
|
||||||
|
(4B) PUT foo, versionId: ``''``
|
||||||
|
|
||||||
|
+--------------+---------+
|
||||||
|
| key | value |
|
||||||
|
+==============+=========+
|
||||||
|
| foo (null) | D |
|
||||||
|
+--------------+---------+
|
||||||
|
| foo.v2 | C |
|
||||||
|
+--------------+---------+
|
||||||
|
|
||||||
|
The ``nullVersionId`` attribute is also used to retrieve the correct
|
||||||
|
version when the version ID "null" is specified in certain object-level
|
||||||
|
APIs, described further in the section `"Null Version
|
||||||
|
Mapping" <#null-version-mapping>`__.
|
||||||
|
|
||||||
|
Specifying Versions in APIs for Putting Versions
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Since S3 does not allow an overwrite of existing version data, Put
|
||||||
|
Object, Complete Multipart Upload and Copy Object return
|
||||||
|
``400 InvalidArgument`` if a specific version ID is specified in the
|
||||||
|
request query, e.g. for a ``PUT /foo?versionId=v1`` request.
|
||||||
|
|
||||||
|
PUT Example
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
When S3 receives a request to PUT an object:
|
||||||
|
|
||||||
|
- It checks first if versioning has been configured
|
||||||
|
- If it has not been configured, S3 proceeds to puts the new data, puts
|
||||||
|
the metadata by overwriting the master version, and proceeds to
|
||||||
|
delete any pre-existing data
|
||||||
|
|
||||||
|
If versioning has been configured, S3 checks the following:
|
||||||
|
|
||||||
|
Versioning Enabled
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If versioning is enabled and there is existing object metadata:
|
||||||
|
|
||||||
|
- If the master version is a null version (``isNull: true``) or has no
|
||||||
|
version ID (put before versioning was configured):
|
||||||
|
|
||||||
|
- store the null version metadata as a new version
|
||||||
|
- create a new version and overwrite the master version
|
||||||
|
|
||||||
|
- set ``nullVersionId``: version ID of the null version that was
|
||||||
|
stored
|
||||||
|
|
||||||
|
If versioning is enabled and the master version is not null; or there is
|
||||||
|
no existing object metadata:
|
||||||
|
|
||||||
|
- create a new version and store it, and overwrite the master version
|
||||||
|
|
||||||
|
Versioning Suspended
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If versioning is suspended and there is existing object metadata:
|
||||||
|
|
||||||
|
- If the master version is a null version or has no version ID:
|
||||||
|
|
||||||
|
- overwrite the master version with the new metadata
|
||||||
|
- delete previous object data
|
||||||
|
|
||||||
|
- If master is not a null version and ``nullVersionId`` is defined in
|
||||||
|
the object’s metadata:
|
||||||
|
|
||||||
|
- delete the current null version metadata and data
|
||||||
|
- overwrite the master version with the new metadata
|
||||||
|
|
||||||
|
If there is no existing object metadata, create the new null version as
|
||||||
|
the master version.
|
||||||
|
|
||||||
|
In each of the above cases, set ``isNull`` metadata attribute to true
|
||||||
|
when creating the new null version.
|
||||||
|
|
||||||
|
Behavior of Object-Targeting APIs
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
API methods which can target existing objects or versions, such as Get
|
||||||
|
Object, Head Object, Get Object ACL, Put Object ACL, Copy Object and
|
||||||
|
Copy Part, will perform the action on the latest version of an object if
|
||||||
|
no version ID is specified in the request query or relevant request
|
||||||
|
header (``x-amz-copy-source-version-id`` for Copy Object and Copy Part
|
||||||
|
APIs).
|
||||||
|
|
||||||
|
Two exceptions are the Delete Object and Multi-Object Delete APIs, which
|
||||||
|
will instead attempt to create delete markers, described in the
|
||||||
|
following section, if no version ID is specified.
|
||||||
|
|
||||||
|
No versioning options are necessary to retrieve the latest version from
|
||||||
|
metadata, since the master version is stored in a key with the name of
|
||||||
|
the object. However, when updating the latest version, such as with the
|
||||||
|
Put Object ACL API, S3 sets the ``versionId`` option in the PUT call to
|
||||||
|
metadata to the value stored in the object metadata's ``versionId``
|
||||||
|
attribute. This is done in order to update the metadata both in the
|
||||||
|
master version and the version itself, if it is not a null version.
|
||||||
|
|
||||||
|
When a version id is specified in the request query for these APIs, e.g.
|
||||||
|
``GET /foo?versionId=v1``, S3 will attempt to decode the version ID and
|
||||||
|
perform the action on the appropriate version. To do so, the API sets
|
||||||
|
the value of the ``versionId`` option to the decoded version ID in the
|
||||||
|
metadata call.
|
||||||
|
|
||||||
|
Delete Markers
|
||||||
|
^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If versioning has not been configured for a bucket, the Delete Object
|
||||||
|
and Multi-Object Delete APIs behave as their standard APIs.
|
||||||
|
|
||||||
|
If versioning has been configured, S3 deletes object or version data
|
||||||
|
only if a specific version ID is provided in the request query, e.g.
|
||||||
|
``DELETE /foo?versionId=v1``.
|
||||||
|
|
||||||
|
If no version ID is provided, S3 creates a delete marker by creating a
|
||||||
|
0-byte version with the metadata attribute ``isDeleteMarker: true``. The
|
||||||
|
S3 API will return a ``404 NoSuchKey`` error in response to requests
|
||||||
|
getting or heading an object whose latest version is a delete maker.
|
||||||
|
|
||||||
|
To restore a previous version as the latest version of an object, the
|
||||||
|
delete marker must be deleted, by the same process as deleting any other
|
||||||
|
version.
|
||||||
|
|
||||||
|
The response varies when targeting an object whose latest version is a
|
||||||
|
delete marker for other object-level APIs that can target existing
|
||||||
|
objects and versions, without specifying the version ID.
|
||||||
|
|
||||||
|
- Get Object, Head Object, Get Object ACL, Object Copy and Copy Part
|
||||||
|
return ``404 NoSuchKey``.
|
||||||
|
- Put Object ACL and Put Object Tagging return
|
||||||
|
``405 MethodNotAllowed``.
|
||||||
|
|
||||||
|
These APIs respond to requests specifying the version ID of a delete
|
||||||
|
marker with the error ``405 MethodNotAllowed``, in general. Copy Part
|
||||||
|
and Copy Object respond with ``400 Invalid Request``.
|
||||||
|
|
||||||
|
See section `"Delete Example" <#delete-example>`__ for a summary.
|
||||||
|
|
||||||
|
Null Version Mapping
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
When the null version is specified in a request with the version ID
|
||||||
|
"null", the S3 API must use the ``nullVersionId`` stored in the latest
|
||||||
|
version to retrieve the current null version, if the null version is not
|
||||||
|
the latest version.
|
||||||
|
|
||||||
|
Thus, getting the null version is a two step process:
|
||||||
|
|
||||||
|
1. Get the latest version of the object from metadata. If the latest
|
||||||
|
version's ``isNull`` property is ``true``, then use the latest
|
||||||
|
version's metadata. Otherwise,
|
||||||
|
2. Get the null version of the object from metadata, using the internal
|
||||||
|
version ID of the current null version stored in the latest version's
|
||||||
|
``nullVersionId`` metadata attribute.
|
||||||
|
|
||||||
|
DELETE Example
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
The following steps are used in the delete logic for delete marker
|
||||||
|
creation:
|
||||||
|
|
||||||
|
- If versioning has not been configured: attempt to delete the object
|
||||||
|
- If request is version-specific delete request: attempt to delete the
|
||||||
|
version
|
||||||
|
- otherwise, if not a version-specific delete request and versioning
|
||||||
|
has been configured:
|
||||||
|
|
||||||
|
- create a new 0-byte content-length version
|
||||||
|
- in version's metadata, set a 'isDeleteMarker' property to true
|
||||||
|
|
||||||
|
- Return the version ID of any version deleted or any delete marker
|
||||||
|
created
|
||||||
|
- Set response header ``x-amz-delete-marker`` to true if a delete
|
||||||
|
marker was deleted or created
|
||||||
|
|
||||||
|
The Multi-Object Delete API follows the same logic for each of the
|
||||||
|
objects or versions listed in an xml request. Note that a delete request
|
||||||
|
can result in the creation of a deletion marker even if the object
|
||||||
|
requested to delete does not exist in the first place.
|
||||||
|
|
||||||
|
Object-level APIs which can target existing objects and versions perform
|
||||||
|
the following checks regarding delete markers:
|
||||||
|
|
||||||
|
- If not a version-specific request and versioning has been configured,
|
||||||
|
check the metadata of the latest version
|
||||||
|
- If the 'isDeleteMarker' property is set to true, return
|
||||||
|
``404 NoSuchKey`` or ``405 MethodNotAllowed``
|
||||||
|
- If it is a version-specific request, check the object metadata of the
|
||||||
|
requested version
|
||||||
|
- If the ``isDeleteMarker`` property is set to true, return
|
||||||
|
``405 MethodNotAllowed`` or ``400 InvalidRequest``
|
||||||
|
|
||||||
|
|
||||||
|
Data-metadata daemon Architecture and Operational guide
|
||||||
|
=======================================================
|
||||||
|
|
||||||
|
This document presents the architecture of the data-metadata daemon
|
||||||
|
(dmd) used for the community edition of S3 server. It also provides a
|
||||||
|
guide on how to operate it.
|
||||||
|
|
||||||
|
The dmd is responsible for storing and retrieving S3 data and metadata,
|
||||||
|
and is accessed by S3 connectors through socket.io (metadata) and REST
|
||||||
|
(data) APIs.
|
||||||
|
|
||||||
|
It has been designed such that more than one S3 connector can access the
|
||||||
|
same buckets by communicating with the dmd. It also means that the dmd
|
||||||
|
can be hosted on a separate container or machine.
|
||||||
|
|
||||||
|
Operation
|
||||||
|
---------
|
||||||
|
|
||||||
|
Startup
|
||||||
|
~~~~~~~
|
||||||
|
|
||||||
|
The simplest deployment is still to launch with npm start, this will
|
||||||
|
start one instance of the S3 connector and will listen on the locally
|
||||||
|
bound dmd ports 9990 and 9991 (by default, see below).
|
||||||
|
|
||||||
|
The dmd can be started independently from the S3 server by running this
|
||||||
|
command in the S3 directory:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
npm run start_dmd
|
||||||
|
|
||||||
|
This will open two ports:
|
||||||
|
|
||||||
|
- one is based on socket.io and is used for metadata transfers (9990 by
|
||||||
|
default)
|
||||||
|
|
||||||
|
- the other is a REST interface used for data transfers (9991 by
|
||||||
|
default)
|
||||||
|
|
||||||
|
Then, one or more instances of S3 server without the dmd can be started
|
||||||
|
elsewhere with:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
npm run start_s3server
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Most configuration happens in ``config.json`` for S3 server, local
|
||||||
|
storage paths can be changed where the dmd is started using environment
|
||||||
|
variables, like before: ``S3DATAPATH`` and ``S3METADATAPATH``.
|
||||||
|
|
||||||
|
In ``config.json``, the following sections are used to configure access
|
||||||
|
to the dmd through separate configuration of the data and metadata
|
||||||
|
access:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
"metadataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9990
|
||||||
|
},
|
||||||
|
"dataClient": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 9991
|
||||||
|
},
|
||||||
|
|
||||||
|
To run a remote dmd, you have to do the following:
|
||||||
|
|
||||||
|
- change both ``"host"`` attributes to the IP or host name where the
|
||||||
|
dmd is run.
|
||||||
|
|
||||||
|
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
|
||||||
|
``"dataDaemon"`` sections where the dmd is run to accept remote
|
||||||
|
connections (e.g. ``"::"``)
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
------------
|
||||||
|
|
||||||
|
This section gives a bit more insight on how it works internally.
|
||||||
|
|
||||||
|
.. figure:: ./images/data_metadata_daemon_arch.png
|
||||||
|
:alt: Architecture diagram
|
||||||
|
|
||||||
|
./images/data\_metadata\_daemon\_arch.png
|
||||||
|
|
||||||
|
Metadata on socket.io
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This communication is based on an RPC system based on socket.io events
|
||||||
|
sent by S3 connectors, received by the DMD and acknowledged back to the
|
||||||
|
S3 connector.
|
||||||
|
|
||||||
|
The actual payload sent through socket.io is a JSON-serialized form of
|
||||||
|
the RPC call name and parameters, along with some additional information
|
||||||
|
like the request UIDs, and the sub-level information, sent as object
|
||||||
|
attributes in the JSON request.
|
||||||
|
|
||||||
|
With introduction of versioning support, the updates are now gathered in
|
||||||
|
the dmd for some number of milliseconds max, before being batched as a
|
||||||
|
single write to the database. This is done server-side, so the API is
|
||||||
|
meant to send individual updates.
|
||||||
|
|
||||||
|
Four RPC commands are available to clients: ``put``, ``get``, ``del``
|
||||||
|
and ``createReadStream``. They more or less map the parameters accepted
|
||||||
|
by the corresponding calls in the LevelUp implementation of LevelDB.
|
||||||
|
They differ in the following:
|
||||||
|
|
||||||
|
- The ``sync`` option is ignored (under the hood, puts are gathered
|
||||||
|
into batches which have their ``sync`` property enforced when they
|
||||||
|
are committed to the storage)
|
||||||
|
|
||||||
|
- Some additional versioning-specific options are supported
|
||||||
|
|
||||||
|
- ``createReadStream`` becomes asynchronous, takes an additional
|
||||||
|
callback argument and returns the stream in the second callback
|
||||||
|
parameter
|
||||||
|
|
||||||
|
Debugging the socket.io exchanges can be achieved by running the daemon
|
||||||
|
with ``DEBUG='socket.io*'`` environment variable set.
|
||||||
|
|
||||||
|
One parameter controls the timeout value after which RPC commands sent
|
||||||
|
end with a timeout error, it can be changed either:
|
||||||
|
|
||||||
|
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
|
||||||
|
``lib/network/rpc/rpc.js``
|
||||||
|
|
||||||
|
- or in the constructor call of the ``MetadataFileClient`` object (in
|
||||||
|
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
|
||||||
|
|
||||||
|
Default value is 30000.
|
||||||
|
|
||||||
|
A specific implementation deals with streams, currently used for listing
|
||||||
|
a bucket. Streams emit ``"stream-data"`` events that pack one or more
|
||||||
|
items in the listing, and a special ``“stream-end”`` event when done.
|
||||||
|
Flow control is achieved by allowing a certain number of “in flight”
|
||||||
|
packets that have not received an ack yet (5 by default). Two options
|
||||||
|
can tune the behavior (for better throughput or getting it more robust
|
||||||
|
on weak networks), they have to be set in ``mdserver.js`` file directly,
|
||||||
|
as there is no support in ``config.json`` for now for those options:
|
||||||
|
|
||||||
|
- ``streamMaxPendingAck``: max number of pending ack events not yet
|
||||||
|
received (default is 5)
|
||||||
|
|
||||||
|
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output
|
||||||
|
stream packet is sent to the client (default is 5000)
|
||||||
|
|
||||||
|
Data exchange through the REST data port
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Data is read and written with REST semantic.
|
||||||
|
|
||||||
|
The web server recognizes a base path in the URL of ``/DataFile`` to be
|
||||||
|
a request to the data storage service.
|
||||||
|
|
||||||
|
PUT
|
||||||
|
^^^
|
||||||
|
|
||||||
|
A PUT on ``/DataFile`` URL and contents passed in the request body will
|
||||||
|
write a new object to the storage.
|
||||||
|
|
||||||
|
On success, a ``201 Created`` response is returned and the new URL to
|
||||||
|
the object is returned via the ``Location`` header (e.g.
|
||||||
|
``Location: /DataFile/50165db76eecea293abfd31103746dadb73a2074``). The
|
||||||
|
raw key can then be extracted simply by removing the leading
|
||||||
|
``/DataFile`` service information from the returned URL.
|
||||||
|
|
||||||
|
GET
|
||||||
|
^^^
|
||||||
|
|
||||||
|
A GET is simply issued with REST semantic, e.g.:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
GET /DataFile/50165db76eecea293abfd31103746dadb73a2074 HTTP/1.1
|
||||||
|
|
||||||
|
A GET request can ask for a specific range. Range support is complete
|
||||||
|
except for multiple byte ranges.
|
||||||
|
|
||||||
|
DELETE
|
||||||
|
^^^^^^
|
||||||
|
|
||||||
|
DELETE is similar to GET, except that a ``204 No Content`` response is
|
||||||
|
returned on success.
|
||||||
|
|
||||||
|
|
||||||
|
Listing
|
||||||
|
=======
|
||||||
|
|
||||||
|
Listing Types
|
||||||
|
-------------
|
||||||
|
|
||||||
|
We use three different types of metadata listing for various operations.
|
||||||
|
Here are the scenarios we use each for:
|
||||||
|
|
||||||
|
- 'Delimiter' - when no versions are possible in the bucket since it is
|
||||||
|
an internally-used only bucket which is not exposed to a user.
|
||||||
|
Namely,
|
||||||
|
|
||||||
|
1. to list objects in the "user's bucket" to respond to a GET SERVICE
|
||||||
|
request and
|
||||||
|
2. to do internal listings on an MPU shadow bucket to complete multipart
|
||||||
|
upload operations.
|
||||||
|
|
||||||
|
- 'DelimiterVersion' - to list all versions in a bucket
|
||||||
|
- 'DelimiterMaster' - to list just the master versions of objects in a
|
||||||
|
bucket
|
||||||
|
|
||||||
|
Algorithms
|
||||||
|
----------
|
||||||
|
|
||||||
|
The algorithms for each listing type can be found in the open-source
|
||||||
|
`scality/Arsenal <https://github.com/scality/Arsenal>`__ repository, in
|
||||||
|
`lib/algos/list <https://github.com/scality/Arsenal/tree/master/lib/algos/list>`__.
|
|
@ -0,0 +1,276 @@
|
||||||
|
Clients
|
||||||
|
=========
|
||||||
|
|
||||||
|
List of applications that have been tested with S3 Server
|
||||||
|
|
||||||
|
GUI
|
||||||
|
~~~
|
||||||
|
|
||||||
|
`Cyberduck <https://cyberduck.io/?l=en>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- https://www.youtube.com/watch?v=-n2MCt4ukUg
|
||||||
|
- https://www.youtube.com/watch?v=IyXHcu4uqgU
|
||||||
|
|
||||||
|
`Cloud Explorer <https://www.linux-toys.com/?p=945>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- https://www.youtube.com/watch?v=2hhtBtmBSxE
|
||||||
|
|
||||||
|
`CloudBerry Lab <http://www.cloudberrylab.com>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
- https://youtu.be/IjIx8g\_o0gY
|
||||||
|
|
||||||
|
Command Line Tools
|
||||||
|
~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
`s3curl <https://github.com/rtdp/s3curl>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
|
||||||
|
|
||||||
|
`aws-cli <http://docs.aws.amazon.com/cli/latest/reference/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
``~/.aws/credentials`` on Linux, OS X, or Unix or
|
||||||
|
``C:\Users\USERNAME\.aws\credentials`` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
[default]
|
||||||
|
aws_access_key_id = accessKey1
|
||||||
|
aws_secret_access_key = verySecretKey1
|
||||||
|
|
||||||
|
``~/.aws/config`` on Linux, OS X, or Unix or
|
||||||
|
``C:\Users\USERNAME\.aws\config`` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
[default]
|
||||||
|
region = us-east-1
|
||||||
|
|
||||||
|
Note: ``us-east-1`` is the default region, but you can specify any
|
||||||
|
region.
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
aws s3 ls --endpoint-url=http://localhost:8000
|
||||||
|
|
||||||
|
Create bucket:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
aws --endpoint-url=http://localhost:8000 s3 mb s3://mybucket
|
||||||
|
|
||||||
|
`s3cmd <http://s3tools.org/s3cmd>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If using s3cmd as a client to S3 be aware that v4 signature format is
|
||||||
|
buggy in s3cmd versions < 1.6.1.
|
||||||
|
|
||||||
|
``~/.s3cfg`` on Linux, OS X, or Unix or ``C:\Users\USERNAME\.s3cfg`` on
|
||||||
|
Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
[default]
|
||||||
|
access_key = accessKey1
|
||||||
|
secret_key = verySecretKey1
|
||||||
|
host_base = localhost:8000
|
||||||
|
host_bucket = %(bucket).localhost:8000
|
||||||
|
signature_v2 = False
|
||||||
|
use_https = False
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
s3cmd ls
|
||||||
|
|
||||||
|
`rclone <http://rclone.org/s3/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
``~/.rclone.conf`` on Linux, OS X, or Unix or
|
||||||
|
``C:\Users\USERNAME\.rclone.conf`` on Windows
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
[remote]
|
||||||
|
type = s3
|
||||||
|
env_auth = false
|
||||||
|
access_key_id = accessKey1
|
||||||
|
secret_access_key = verySecretKey1
|
||||||
|
region = other-v2-signature
|
||||||
|
endpoint = http://localhost:8000
|
||||||
|
location_constraint =
|
||||||
|
acl = private
|
||||||
|
server_side_encryption =
|
||||||
|
storage_class =
|
||||||
|
|
||||||
|
See all buckets:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
rclone lsd remote:
|
||||||
|
|
||||||
|
JavaScript
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
`AWS JavaScript SDK <http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: javascript
|
||||||
|
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
|
||||||
|
const s3 = new AWS.S3({
|
||||||
|
accessKeyId: 'accessKey1',
|
||||||
|
secretAccessKey: 'verySecretKey1',
|
||||||
|
endpoint: 'localhost:8000',
|
||||||
|
sslEnabled: false,
|
||||||
|
s3ForcePathStyle: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
JAVA
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
`AWS JAVA SDK <http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: java
|
||||||
|
|
||||||
|
import com.amazonaws.auth.AWSCredentials;
|
||||||
|
import com.amazonaws.auth.BasicAWSCredentials;
|
||||||
|
import com.amazonaws.services.s3.AmazonS3;
|
||||||
|
import com.amazonaws.services.s3.AmazonS3Client;
|
||||||
|
import com.amazonaws.services.s3.S3ClientOptions;
|
||||||
|
import com.amazonaws.services.s3.model.Bucket;
|
||||||
|
|
||||||
|
public class S3 {
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
|
||||||
|
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
|
||||||
|
"verySecretKey1");
|
||||||
|
|
||||||
|
// Create a client connection based on credentials
|
||||||
|
AmazonS3 s3client = new AmazonS3Client(credentials);
|
||||||
|
s3client.setEndpoint("http://localhost:8000");
|
||||||
|
// Using path-style requests
|
||||||
|
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
|
||||||
|
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
|
||||||
|
|
||||||
|
// Create bucket
|
||||||
|
String bucketName = "javabucket";
|
||||||
|
s3client.createBucket(bucketName);
|
||||||
|
|
||||||
|
// List off all buckets
|
||||||
|
for (Bucket bucket : s3client.listBuckets()) {
|
||||||
|
System.out.println(" - " + bucket.getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ruby
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: ruby
|
||||||
|
|
||||||
|
require 'aws-sdk'
|
||||||
|
|
||||||
|
s3 = Aws::S3::Client.new(
|
||||||
|
:access_key_id => 'accessKey1',
|
||||||
|
:secret_access_key => 'verySecretKey1',
|
||||||
|
:endpoint => 'http://localhost:8000',
|
||||||
|
:force_path_style => true
|
||||||
|
)
|
||||||
|
|
||||||
|
resp = s3.list_buckets
|
||||||
|
|
||||||
|
`fog <http://fog.io/storage/>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: ruby
|
||||||
|
|
||||||
|
require "fog"
|
||||||
|
|
||||||
|
connection = Fog::Storage.new(
|
||||||
|
{
|
||||||
|
:provider => "AWS",
|
||||||
|
:aws_access_key_id => 'accessKey1',
|
||||||
|
:aws_secret_access_key => 'verySecretKey1',
|
||||||
|
:endpoint => 'http://localhost:8000',
|
||||||
|
:path_style => true,
|
||||||
|
:scheme => 'http',
|
||||||
|
})
|
||||||
|
|
||||||
|
Python
|
||||||
|
~~~~~~
|
||||||
|
|
||||||
|
`boto2 <http://boto.cloudhackers.com/en/latest/ref/s3.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
import boto
|
||||||
|
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
|
||||||
|
|
||||||
|
|
||||||
|
connection = S3Connection(
|
||||||
|
aws_access_key_id='accessKey1',
|
||||||
|
aws_secret_access_key='verySecretKey1',
|
||||||
|
is_secure=False,
|
||||||
|
port=8000,
|
||||||
|
calling_format=OrdinaryCallingFormat(),
|
||||||
|
host='localhost'
|
||||||
|
)
|
||||||
|
|
||||||
|
connection.create_bucket('mybucket')
|
||||||
|
|
||||||
|
`boto3 <http://boto3.readthedocs.io/en/latest/index.html>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
client = boto3.client(
|
||||||
|
's3',
|
||||||
|
aws_access_key_id='accessKey1',
|
||||||
|
aws_secret_access_key='verySecretKey1',
|
||||||
|
endpoint_url='http://localhost:8000'
|
||||||
|
)
|
||||||
|
|
||||||
|
lists = client.list_buckets()
|
||||||
|
|
||||||
|
PHP
|
||||||
|
~~~
|
||||||
|
|
||||||
|
Should use v3 over v2 because v2 would create virtual-hosted style URLs
|
||||||
|
while v3 generates path-style URLs.
|
||||||
|
|
||||||
|
`AWS PHP SDK v3 <https://docs.aws.amazon.com/aws-sdk-php/v3/guide>`__
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
.. code:: php
|
||||||
|
|
||||||
|
use Aws\S3\S3Client;
|
||||||
|
|
||||||
|
$client = S3Client::factory([
|
||||||
|
'region' => 'us-east-1',
|
||||||
|
'version' => 'latest',
|
||||||
|
'endpoint' => 'http://localhost:8000',
|
||||||
|
'credentials' => [
|
||||||
|
'key' => 'accessKey1',
|
||||||
|
'secret' => 'verySecretKey1'
|
||||||
|
]
|
||||||
|
]);
|
||||||
|
|
||||||
|
$client->createBucket(array(
|
||||||
|
'Bucket' => 'bucketphp',
|
||||||
|
));
|
|
@ -0,0 +1,24 @@
|
||||||
|
Contributing
|
||||||
|
============
|
||||||
|
|
||||||
|
Need help?
|
||||||
|
----------
|
||||||
|
We're always glad to help out. Simply open a
|
||||||
|
`GitHub issue <https://github.com/scality/S3/issues>`__ and we'll give you
|
||||||
|
insight. If what you want is not available, and if you're willing to help us
|
||||||
|
out, we'll be happy to welcome you in the team, whether for a small fix or for
|
||||||
|
a larger feature development. Thanks for your interest!
|
||||||
|
|
||||||
|
Got an idea? Get started!
|
||||||
|
-------------------------
|
||||||
|
In order to contribute, please follow the `Contributing
|
||||||
|
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
|
||||||
|
If anything is unclear to you, reach out to us on
|
||||||
|
`slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
|
||||||
|
|
||||||
|
Don't write code? There are other ways to help!
|
||||||
|
-----------------------------------------------
|
||||||
|
We're always eager to learn about our users' stories. If you can't contribute
|
||||||
|
code, but would love to help us, please shoot us an email at zenko@scality.com,
|
||||||
|
and tell us what our software enables you to do! Thanks for your time!
|
||||||
|
|
|
@ -0,0 +1,259 @@
|
||||||
|
Docker
|
||||||
|
======
|
||||||
|
|
||||||
|
- `For continuous integration with
|
||||||
|
Docker <#for-continuous-integration-with-docker>`__
|
||||||
|
- `Environment Variables <#environment-variables>`__
|
||||||
|
- `In production with Docker <#in-production-with-docker>`__
|
||||||
|
- `Using Docker Volume in
|
||||||
|
production <#using-docker-volume-in-production>`__
|
||||||
|
- `Adding modifying or deleting accounts or users
|
||||||
|
credentials <#adding-modifying-or-deleting-accounts-or-users-credentials>`__
|
||||||
|
- `Specifying your own host name <#specifying-your-own-host-name>`__
|
||||||
|
- `Running as an unprivileged
|
||||||
|
user <#running-as-an-unprivileged-user>`__
|
||||||
|
|
||||||
|
For continuous integration with Docker
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
When you start the Docker Scality S3 server image, you can adjust the
|
||||||
|
configuration of the Scality S3 server instance by passing one or more
|
||||||
|
environment variables on the docker run command line.
|
||||||
|
|
||||||
|
Environment Variables
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
S3DATA=multiple
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
This runs Scality S3 server with multiple data backends. `More
|
||||||
|
info <https://github.com/scality/S3#run-it-with-multiple-data-backends>`__
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e S3DATA=multiple scality/s3server
|
||||||
|
|
||||||
|
HOST\_NAME
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
This variable specifies a host name. If you have a domain such as
|
||||||
|
new.host.com, by specifying that here, you and your users can direct s3
|
||||||
|
server requests to new.host.com.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e HOST_NAME=new.host.com scality/s3server
|
||||||
|
|
||||||
|
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||||
|
permissions, make sure to associate 127.0.0.1 with ``new.host.com``
|
||||||
|
|
||||||
|
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
These variables specify authentication credentials for an account named
|
||||||
|
"CustomAccount".
|
||||||
|
|
||||||
|
You can set credentials for many accounts by editing
|
||||||
|
``conf/authdata.json`` (see below for further info), but if you just
|
||||||
|
want to specify one set of your own, you can use these environment
|
||||||
|
variables.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
|
||||||
|
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
|
||||||
|
|
||||||
|
Note: Anything in the ``authdata.json`` file will be ignored. Note: The
|
||||||
|
old ``ACCESS_KEY`` and ``SECRET_KEY`` environment variables are now
|
||||||
|
deprecated
|
||||||
|
|
||||||
|
LOG\_LEVEL
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
This variable allows you to change the log level: info, debug or trace.
|
||||||
|
The default is info. Debug will give you more detailed logs and trace
|
||||||
|
will give you the most detailed.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
|
||||||
|
|
||||||
|
SSL
|
||||||
|
^^^
|
||||||
|
|
||||||
|
This variable specifies the Common Name ``<DOMAIN_NAME>`` used to create
|
||||||
|
the Certificate Signing Request using OpenSSL. This allows you to run S3
|
||||||
|
with SSL:
|
||||||
|
|
||||||
|
**Note**: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
|
||||||
|
permissions, make sure to associate 127.0.0.1 with
|
||||||
|
``<SUBDOMAIN>.<DOMAIN_NAME>``
|
||||||
|
|
||||||
|
**Warning**: These certs, being self-signed (and the CA being generated
|
||||||
|
inside the container) will be untrusted by any clients, and could
|
||||||
|
disappear on a container upgrade. That's ok as long as it's for quick
|
||||||
|
testing. Also, best security practice for non-testing would be to use an
|
||||||
|
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
|
||||||
|
to limit what an exploit on either component could expose, as well as
|
||||||
|
certificates in a mounted volume
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000 -e SSL=<DOMAIN_NAME> -e HOST_NAME=<SUBDOMAIN>.<DOMAIN_NAME>
|
||||||
|
scality/s3server
|
||||||
|
|
||||||
|
More information about how to use S3 server with SSL
|
||||||
|
`here <https://s3.scality.com/v1.0/page/scality-with-ssl>`__
|
||||||
|
|
||||||
|
LISTEN\_ADDR
|
||||||
|
^^^^^^^^^^^^
|
||||||
|
|
||||||
|
This variable instructs the S3 server, and its data and metadata components
|
||||||
|
to listen on the specified address. This allows starting the data or metadata
|
||||||
|
servers as standalone services, for example.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
|
||||||
|
scality/s3server npm run start_dataserver
|
||||||
|
|
||||||
|
|
||||||
|
DATA\_HOST and METADATA\_HOST
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
These variables configure the data and metadata servers to use,
|
||||||
|
usually when they are running on another host and only starting the stateless
|
||||||
|
S3 REST server.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -e DATA_HOST=s3server-data
|
||||||
|
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
|
||||||
|
|
||||||
|
REDIS\_HOST
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Use this variable to connect to the redis cache server on another host than
|
||||||
|
localhost.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000
|
||||||
|
-e REDIS_HOST=my-redis-server.example.com scality/s3server
|
||||||
|
|
||||||
|
REDIS\_PORT
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Use this variable to connect to the redis cache server on another port than
|
||||||
|
the default 6379.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -d --name s3server -p 8000:8000
|
||||||
|
-e REDIS_PORT=6379 scality/s3server
|
||||||
|
|
||||||
|
In production with Docker
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
Using Docker Volume in production
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
S3 server runs with a file backend by default.
|
||||||
|
|
||||||
|
So, by default, the data is stored inside your S3 server Docker
|
||||||
|
container.
|
||||||
|
|
||||||
|
However, if you want your data and metadata to persist, you **MUST** use
|
||||||
|
Docker volumes to host your data and metadata outside your s3 server
|
||||||
|
Docker container. Otherwise, the data and metadata will be destroyed
|
||||||
|
when you erase the container.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
|
||||||
|
-p 8000:8000 -d scality/s3server
|
||||||
|
|
||||||
|
This command mounts the host directory, ``./data``, into the container
|
||||||
|
at /usr/src/app/localData and the host directory, ``./metadata``, into
|
||||||
|
the container at /usr/src/app/localMetaData. It can also be any host
|
||||||
|
mount point, like ``/mnt/data`` and ``/mnt/metadata``.
|
||||||
|
|
||||||
|
Adding modifying or deleting accounts or users credentials
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
1. Create locally a customized ``authdata.json``.
|
||||||
|
|
||||||
|
2. Use `Docker
|
||||||
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
|
||||||
|
|
||||||
|
to override the default ``authdata.json`` through a docker file mapping.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
|
||||||
|
scality/s3server
|
||||||
|
|
||||||
|
Specifying your own host name
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
To specify a host name (e.g. s3.domain.name), you can provide your own
|
||||||
|
`config.json <https://github.com/scality/S3/blob/master/config.json>`__
|
||||||
|
using `Docker
|
||||||
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__.
|
||||||
|
|
||||||
|
First add a new key-value pair in the restEndpoints section of your
|
||||||
|
config.json. The key in the key-value pair should be the host name you
|
||||||
|
would like to add and the value is the default location\_constraint for
|
||||||
|
this endpoint.
|
||||||
|
|
||||||
|
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
|
||||||
|
of the ``location_constraints`` listed in your locationConfig.json file
|
||||||
|
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
|
||||||
|
|
||||||
|
More information about location configuration
|
||||||
|
`here <https://github.com/scality/S3/blob/master/README.md#location-configuration>`__
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "file",
|
||||||
|
"127.0.0.1": "file",
|
||||||
|
...
|
||||||
|
"s3.example.com": "us-east-1"
|
||||||
|
},
|
||||||
|
|
||||||
|
Then, run your Scality S3 Server using `Docker
|
||||||
|
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
|
||||||
|
|
||||||
|
Your local ``config.json`` file will override the default one through a
|
||||||
|
docker file mapping.
|
||||||
|
|
||||||
|
Running as an unprivileged user
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
S3 Server runs as root by default.
|
||||||
|
|
||||||
|
You can change that by modifing the dockerfile and specifying a user
|
||||||
|
before the entrypoint.
|
||||||
|
|
||||||
|
The user needs to exist within the container, and own the folder
|
||||||
|
**/usr/src/app** for Scality S3 Server to run properly.
|
||||||
|
|
||||||
|
For instance, you can modify these lines in the dockerfile:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
...
|
||||||
|
&& groupadd -r -g 1001 scality \
|
||||||
|
&& useradd -u 1001 -g 1001 -d /usr/src/app -r scality \
|
||||||
|
&& chown -R scality:scality /usr/src/app
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
USER scality
|
||||||
|
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
|
@ -0,0 +1,414 @@
|
||||||
|
Getting Started
|
||||||
|
=================
|
||||||
|
|
||||||
|
.. figure:: ../res/Scality-S3-Server-Logo-Large.png
|
||||||
|
:alt: S3 Server logo
|
||||||
|
|
||||||
|
|CircleCI| |Scality CI|
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
Dependencies
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Building and running the Scality S3 Server requires node.js 6.9.5 and
|
||||||
|
npm v3 . Up-to-date versions can be found at
|
||||||
|
`Nodesource <https://github.com/nodesource/distributions>`__.
|
||||||
|
|
||||||
|
Clone source code
|
||||||
|
~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
git clone https://github.com/scality/S3.git
|
||||||
|
|
||||||
|
Install js dependencies
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Go to the ./S3 folder,
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm install
|
||||||
|
|
||||||
|
Run it with a file backend
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm start
|
||||||
|
|
||||||
|
This starts an S3 server on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with a secret key of
|
||||||
|
verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the localMetadata
|
||||||
|
directory and the data files will be saved in the localData directory
|
||||||
|
within the ./S3 directory on your machine. These directories have been
|
||||||
|
pre-created within the repository. If you would like to save the data or
|
||||||
|
metadata in different locations of your choice, you must specify them
|
||||||
|
with absolute paths. So, when starting the server:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
npm start
|
||||||
|
|
||||||
|
Run it with multiple data backends
|
||||||
|
----------------------------------
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
export S3DATA='multiple'
|
||||||
|
npm start
|
||||||
|
|
||||||
|
This starts an S3 server on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to choose where each object
|
||||||
|
will be saved by setting the following header with a locationConstraint
|
||||||
|
on a PUT request:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the location constraint
|
||||||
|
of the bucket will determine where the data is saved. If the bucket has
|
||||||
|
no location constraint, the endpoint of the PUT request will be used to
|
||||||
|
determine location.
|
||||||
|
|
||||||
|
See the Configuration section below to learn how to set location
|
||||||
|
constraints.
|
||||||
|
|
||||||
|
Run it with an in-memory backend
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm run mem_backend
|
||||||
|
|
||||||
|
This starts an S3 server on port 8000. The default access key is
|
||||||
|
accessKey1 with a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
Run it for continuous integration testing or in production with Docker
|
||||||
|
----------------------------------------------------------------------
|
||||||
|
|
||||||
|
`DOCKER.rst <DOCKER.rst>`__
|
||||||
|
|
||||||
|
Testing
|
||||||
|
-------
|
||||||
|
|
||||||
|
You can run the unit tests with the following command:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm test
|
||||||
|
|
||||||
|
You can run the multiple backend unit tests with:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm run multiple_backend_test
|
||||||
|
|
||||||
|
You can run the linter with:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm run lint
|
||||||
|
|
||||||
|
Running functional tests locally:
|
||||||
|
|
||||||
|
The test suite requires additional tools, **s3cmd** and **Redis**
|
||||||
|
installed in the environment the tests are running in.
|
||||||
|
|
||||||
|
- Install `s3cmd <http://s3tools.org/download>`__
|
||||||
|
- Install `redis <https://redis.io/download>`__ and start Redis.
|
||||||
|
- Add localCache section to your ``config.json``:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
"localCache": {
|
||||||
|
"host": REDIS_HOST,
|
||||||
|
"port": REDIS_PORT
|
||||||
|
}
|
||||||
|
|
||||||
|
where ``REDIS_HOST`` is your Redis instance IP address (``"127.0.0.1"``
|
||||||
|
if your Redis is running locally) and ``REDIS_PORT`` is your Redis
|
||||||
|
instance port (``6379`` by default)
|
||||||
|
|
||||||
|
- Add the following to the etc/hosts file on your machine:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
|
||||||
|
|
||||||
|
- Start the S3 server in memory and run the functional tests:
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
npm run mem_backend
|
||||||
|
npm run ft_test
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
-------------
|
||||||
|
|
||||||
|
There are three configuration files for your Scality S3 Server:
|
||||||
|
|
||||||
|
1. ``conf/authdata.json``, described above for authentication
|
||||||
|
|
||||||
|
2. ``locationConfig.json``, to set up configuration options for
|
||||||
|
|
||||||
|
where data will be saved
|
||||||
|
|
||||||
|
3. ``config.json``, for general configuration options
|
||||||
|
|
||||||
|
Location Configuration
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You must specify at least one locationConstraint in your
|
||||||
|
locationConfig.json (or leave as pre-configured).
|
||||||
|
|
||||||
|
You must also specify 'us-east-1' as a locationConstraint so if you only
|
||||||
|
define one locationConstraint, that would be it. If you put a bucket to
|
||||||
|
an unknown endpoint and do not specify a locationConstraint in the put
|
||||||
|
bucket call, us-east-1 will be used.
|
||||||
|
|
||||||
|
For instance, the following locationConstraint will save data sent to
|
||||||
|
``myLocationConstraint`` to the file backend:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
"myLocationConstraint": {
|
||||||
|
"type": "file",
|
||||||
|
"legacyAwsBehavior": false,
|
||||||
|
"details": {}
|
||||||
|
},
|
||||||
|
|
||||||
|
Each locationConstraint must include the ``type``,
|
||||||
|
``legacyAwsBehavior``, and ``details`` keys. ``type`` indicates which
|
||||||
|
backend will be used for that region. Currently, mem, file, and scality
|
||||||
|
are the supported backends. ``legacyAwsBehavior`` indicates whether the
|
||||||
|
region will have the same behavior as the AWS S3 'us-east-1' region. If
|
||||||
|
the locationConstraint type is scality, ``details`` should contain
|
||||||
|
connector information for sproxyd. If the locationConstraint type is mem
|
||||||
|
or file, ``details`` should be empty.
|
||||||
|
|
||||||
|
Once you have your locationConstraints in your locationConfig.json, you
|
||||||
|
can specify a default locationConstraint for each of your endpoints.
|
||||||
|
|
||||||
|
For instance, the following sets the ``localhost`` endpoint to the
|
||||||
|
``myLocationConstraint`` data backend defined above:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
"restEndpoints": {
|
||||||
|
"localhost": "myLocationConstraint"
|
||||||
|
},
|
||||||
|
|
||||||
|
If you would like to use an endpoint other than localhost for your
|
||||||
|
Scality S3 Server, that endpoint MUST be listed in your
|
||||||
|
``restEndpoints``. Otherwise if your server is running with a:
|
||||||
|
|
||||||
|
- **file backend**: your default location constraint will be ``file``
|
||||||
|
|
||||||
|
- **memory backend**: your default location constraint will be ``mem``
|
||||||
|
|
||||||
|
Endpoints
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Note that our S3server supports both:
|
||||||
|
|
||||||
|
- path-style: http://myhostname.com/mybucket
|
||||||
|
- hosted-style: http://mybucket.myhostname.com
|
||||||
|
|
||||||
|
However, hosted-style requests will not hit the server if you are using
|
||||||
|
an ip address for your host. So, make sure you are using path-style
|
||||||
|
requests in that case. For instance, if you are using the AWS SDK for
|
||||||
|
JavaScript, you would instantiate your client like this:
|
||||||
|
|
||||||
|
.. code:: js
|
||||||
|
|
||||||
|
const s3 = new aws.S3({
|
||||||
|
endpoint: 'http://127.0.0.1:8000',
|
||||||
|
s3ForcePathStyle: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
Setting your own access key and secret key pairs
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
You can set credentials for many accounts by editing
|
||||||
|
``conf/authdata.json`` but if you want to specify one set of your own
|
||||||
|
credentials, you can use ``SCALITY_ACCESS_KEY_ID`` and
|
||||||
|
``SCALITY_SECRET_ACCESS_KEY`` environment variables.
|
||||||
|
|
||||||
|
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
These variables specify authentication credentials for an account named
|
||||||
|
"CustomAccount".
|
||||||
|
|
||||||
|
Note: Anything in the ``authdata.json`` file will be ignored.
|
||||||
|
|
||||||
|
.. code:: shell
|
||||||
|
|
||||||
|
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
|
||||||
|
|
||||||
|
|
||||||
|
Scality with SSL
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
If you wish to use https with your local S3 Server, you need to set up
|
||||||
|
SSL certificates. Here is a simple guide of how to do it.
|
||||||
|
|
||||||
|
Deploying S3 Server
|
||||||
|
^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
First, you need to deploy **S3 Server**. This can be done very easily
|
||||||
|
via `our **DockerHub**
|
||||||
|
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||||
|
with a file backend).
|
||||||
|
|
||||||
|
*Note:* *- If you don't have docker installed on your machine, here
|
||||||
|
are the `instructions to install it for your
|
||||||
|
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||||
|
|
||||||
|
Updating your S3 Server container's config
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
You're going to add your certificates to your container. In order to do
|
||||||
|
so, you need to exec inside your s3 server container. Run a
|
||||||
|
``$> docker ps`` and find your container's id (the corresponding image
|
||||||
|
name should be ``scality/s3server``. Copy the corresponding container id
|
||||||
|
(here we'll use ``894aee038c5e``, and run:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker exec -it 894aee038c5e bash
|
||||||
|
|
||||||
|
You're now inside your container, using an interactive terminal :)
|
||||||
|
|
||||||
|
Generate SSL key and certificates
|
||||||
|
**********************************
|
||||||
|
|
||||||
|
There are 5 steps to this generation. The paths where the different
|
||||||
|
files are stored are defined after the ``-out`` option in each command
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
# Generate a private key for your CSR
|
||||||
|
$> openssl genrsa -out ca.key 2048
|
||||||
|
# Generate a self signed certificate for your local Certificate Authority
|
||||||
|
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
|
||||||
|
|
||||||
|
# Generate a key for S3 Server
|
||||||
|
$> openssl genrsa -out test.key 2048
|
||||||
|
# Generate a Certificate Signing Request for S3 Server
|
||||||
|
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
|
||||||
|
# Generate a local-CA-signed certificate for S3 Server
|
||||||
|
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
|
||||||
|
|
||||||
|
Update S3Server ``config.json``
|
||||||
|
**********************************
|
||||||
|
|
||||||
|
Add a ``certFilePaths`` section to ``./config.json`` with the
|
||||||
|
appropriate paths:
|
||||||
|
|
||||||
|
.. code:: json
|
||||||
|
|
||||||
|
"certFilePaths": {
|
||||||
|
"key": "./test.key",
|
||||||
|
"cert": "./test.crt",
|
||||||
|
"ca": "./ca.crt"
|
||||||
|
}
|
||||||
|
|
||||||
|
Run your container with the new config
|
||||||
|
****************************************
|
||||||
|
|
||||||
|
First, you need to exit your container. Simply run ``$> exit``. Then,
|
||||||
|
you need to restart your container. Normally, a simple
|
||||||
|
``$> docker restart s3server`` should do the trick.
|
||||||
|
|
||||||
|
Update your host config
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Associates local IP addresses with hostname
|
||||||
|
*******************************************
|
||||||
|
|
||||||
|
In your ``/etc/hosts`` file on Linux, OS X, or Unix (with root
|
||||||
|
permissions), edit the line of localhost so it looks like this:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
127.0.0.1 localhost s3.scality.test
|
||||||
|
|
||||||
|
Copy the local certificate authority from your container
|
||||||
|
*********************************************************
|
||||||
|
|
||||||
|
In the above commands, it's the file named ``ca.crt``. Choose the path
|
||||||
|
you want to save this file at (here we chose ``/root/ca.crt``), and run
|
||||||
|
something like:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
|
||||||
|
|
||||||
|
Test your config
|
||||||
|
^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
If you do not have aws-sdk installed, run ``$> npm install aws-sdk``. In
|
||||||
|
a ``test.js`` file, paste the following script:
|
||||||
|
|
||||||
|
.. code:: js
|
||||||
|
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const fs = require('fs');
|
||||||
|
const https = require('https');
|
||||||
|
|
||||||
|
const httpOptions = {
|
||||||
|
agent: new https.Agent({
|
||||||
|
// path on your host of the self-signed certificate
|
||||||
|
ca: fs.readFileSync('./ca.crt', 'ascii'),
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
const s3 = new AWS.S3({
|
||||||
|
httpOptions,
|
||||||
|
accessKeyId: 'accessKey1',
|
||||||
|
secretAccessKey: 'verySecretKey1',
|
||||||
|
// The endpoint must be s3.scality.test, else SSL will not work
|
||||||
|
endpoint: 'https://s3.scality.test:8000',
|
||||||
|
sslEnabled: true,
|
||||||
|
// With this setup, you must use path-style bucket access
|
||||||
|
s3ForcePathStyle: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
const bucket = 'cocoriko';
|
||||||
|
|
||||||
|
s3.createBucket({ Bucket: bucket }, err => {
|
||||||
|
if (err) {
|
||||||
|
return console.log('err createBucket', err);
|
||||||
|
}
|
||||||
|
return s3.deleteBucket({ Bucket: bucket }, err => {
|
||||||
|
if (err) {
|
||||||
|
return console.log('err deleteBucket', err);
|
||||||
|
}
|
||||||
|
return console.log('SSL is cool!');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
Now run that script with ``$> nodejs test.js``. If all goes well, it
|
||||||
|
should output ``SSL is cool!``. Enjoy that added security!
|
||||||
|
|
||||||
|
|
||||||
|
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
:target: https://circleci.com/gh/scality/S3
|
||||||
|
.. |Scality CI| image:: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
:target: http://ci.ironmann.io/gh/scality/S3
|
|
@ -0,0 +1,642 @@
|
||||||
|
Integrations
|
||||||
|
++++++++++++
|
||||||
|
|
||||||
|
High Availability
|
||||||
|
=================
|
||||||
|
|
||||||
|
`Docker swarm <https://docs.docker.com/engine/swarm/>`__ is a
|
||||||
|
clustering tool developped by Docker and ready to use with its
|
||||||
|
containers. It allows to start a service, which we define and use as a
|
||||||
|
means to ensure s3server's continuous availability to the end user.
|
||||||
|
Indeed, a swarm defines a manager and n workers among n+1 servers. We
|
||||||
|
will do a basic setup in this tutorial, with just 3 servers, which
|
||||||
|
already provides a strong service resiliency, whilst remaining easy to
|
||||||
|
do as an individual. We will use NFS through docker to share data and
|
||||||
|
metadata between the different servers.
|
||||||
|
|
||||||
|
You will see that the steps of this tutorial are defined as **On
|
||||||
|
Server**, **On Clients**, **On All Machines**. This refers respectively
|
||||||
|
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
|
||||||
|
the IP of the Server will be **10.200.15.113**, while the IPs of the
|
||||||
|
Clients will be **10.200.15.96 and 10.200.15.97**
|
||||||
|
|
||||||
|
Installing docker
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
Any version from docker 1.12.6 onwards should work; we used Docker
|
||||||
|
17.03.0-ce for this tutorial.
|
||||||
|
|
||||||
|
On All Machines
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
The docker website has `solid
|
||||||
|
documentation <https://docs.docker.com/engine/installation/linux/ubuntu/>`__.
|
||||||
|
We have chosen to install the aufs dependency, as recommended by Docker.
|
||||||
|
Here are the required commands:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
|
||||||
|
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||||
|
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||||
|
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||||
|
$> sudo apt-get update
|
||||||
|
$> sudo apt-get install docker-ce
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
The docker website has `solid
|
||||||
|
documentation <https://docs.docker.com/engine/installation/linux/centos/>`__.
|
||||||
|
Here are the required commands:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo yum install -y yum-utils
|
||||||
|
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
|
||||||
|
$> sudo yum makecache fast
|
||||||
|
$> sudo yum install docker-ce
|
||||||
|
$> sudo systemctl start docker
|
||||||
|
|
||||||
|
Configure NFS
|
||||||
|
-------------
|
||||||
|
|
||||||
|
On Clients
|
||||||
|
~~~~~~~~~~
|
||||||
|
|
||||||
|
Your NFS Clients will mount Docker volumes over your NFS Server's shared
|
||||||
|
folders. Hence, you don't have to mount anything manually, you just have
|
||||||
|
to install the NFS commons:
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Simply install the NFS commons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get install nfs-common
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Install the NFS utils, and then start the required services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
|
||||||
|
On Server
|
||||||
|
~~~~~~~~~
|
||||||
|
|
||||||
|
Your NFS Server will be the machine to physically host the data and
|
||||||
|
metadata. The package(s) we will install on it is slightly different
|
||||||
|
from the one we installed on the clients.
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Install the NFS server specific package and the NFS commons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get install nfs-kernel-server nfs-common
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Same steps as with the client: install the NFS utils and start the
|
||||||
|
required services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> yum install nfs-utils
|
||||||
|
$> sudo systemctl enable rpcbind
|
||||||
|
$> sudo systemctl enable nfs-server
|
||||||
|
$> sudo systemctl enable nfs-lock
|
||||||
|
$> sudo systemctl enable nfs-idmap
|
||||||
|
$> sudo systemctl start rpcbind
|
||||||
|
$> sudo systemctl start nfs-server
|
||||||
|
$> sudo systemctl start nfs-lock
|
||||||
|
$> sudo systemctl start nfs-idmap
|
||||||
|
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Choose where your shared data and metadata from your local `S3
|
||||||
|
Server <http://www.scality.com/scality-s3-server/>`__ will be stored.
|
||||||
|
We chose to go with /var/nfs/data and /var/nfs/metadata. You also need
|
||||||
|
to set proper sharing permissions for these folders as they'll be shared
|
||||||
|
over NFS:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> mkdir -p /var/nfs/data /var/nfs/metadata
|
||||||
|
$> chmod -R 777 /var/nfs/
|
||||||
|
|
||||||
|
Now you need to update your **/etc/exports** file. This is the file that
|
||||||
|
configures network permissions and rwx permissions for NFS access. By
|
||||||
|
default, Ubuntu applies the no\_subtree\_check option, so we declared
|
||||||
|
both folders with the same permissions, even though they're in the same
|
||||||
|
tree:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo vim /etc/exports
|
||||||
|
|
||||||
|
In this file, add the following lines:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
|
||||||
|
|
||||||
|
Export this new NFS table:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo exportfs -a
|
||||||
|
|
||||||
|
Eventually, you need to allow for NFS mount from Docker volumes on other
|
||||||
|
machines. You need to change the Docker config in
|
||||||
|
**/lib/systemd/system/docker.service**:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo vim /lib/systemd/system/docker.service
|
||||||
|
|
||||||
|
In this file, change the **MountFlags** option:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
MountFlags=shared
|
||||||
|
|
||||||
|
Now you just need to restart the NFS server and docker daemons so your
|
||||||
|
changes apply.
|
||||||
|
|
||||||
|
On Ubuntu 14.04
|
||||||
|
^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Restart your NFS Server and docker services:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo service nfs-kernel-server restart
|
||||||
|
$> sudo service docker restart
|
||||||
|
|
||||||
|
On CentOS 7
|
||||||
|
^^^^^^^^^^^
|
||||||
|
|
||||||
|
Restart your NFS Server and docker daemons:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo systemctl restart nfs-server
|
||||||
|
$> sudo systemctl daemon-reload
|
||||||
|
$> sudo systemctl restart docker
|
||||||
|
|
||||||
|
Set up your Docker Swarm service
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
On All Machines
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
We will now set up the Docker volumes that will be mounted to the NFS
|
||||||
|
Server and serve as data and metadata storage for S3 Server. These two
|
||||||
|
commands have to be replicated on all machines:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
|
||||||
|
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
|
||||||
|
|
||||||
|
There is no need to ""docker exec" these volumes to mount them: the
|
||||||
|
Docker Swarm manager will do it when the Docker service will be started.
|
||||||
|
|
||||||
|
On Server
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
To start a Docker service on a Docker Swarm cluster, you first have to
|
||||||
|
initialize that cluster (i.e.: define a manager), then have the
|
||||||
|
workers/nodes join in, and then start the service. Initialize the swarm
|
||||||
|
cluster, and look at the response:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker swarm init --advertise-addr 10.200.15.113
|
||||||
|
|
||||||
|
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
|
||||||
|
|
||||||
|
To add a worker to this swarm, run the following command:
|
||||||
|
|
||||||
|
docker swarm join \
|
||||||
|
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
|
||||||
|
10.200.15.113:2377
|
||||||
|
|
||||||
|
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
|
||||||
|
|
||||||
|
On Clients
|
||||||
|
^^^^^^^^^^
|
||||||
|
|
||||||
|
Simply copy/paste the command provided by your docker swarm init. When
|
||||||
|
all goes well, you'll get something like this:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
|
||||||
|
|
||||||
|
This node joined a swarm as a worker.
|
||||||
|
|
||||||
|
On Server
|
||||||
|
^^^^^^^^^
|
||||||
|
|
||||||
|
Start the service on your swarm cluster!
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
|
||||||
|
|
||||||
|
If you run a docker service ls, you should have the following output:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker service ls
|
||||||
|
ID NAME MODE REPLICAS IMAGE
|
||||||
|
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
|
||||||
|
|
||||||
|
If your service won't start, consider disabling apparmor/SELinux.
|
||||||
|
|
||||||
|
Testing your High Availability S3Server
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
On All Machines
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
On Ubuntu 14.04 and CentOS 7
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
||||||
|
Try to find out where your Scality S3 Server is actually running using
|
||||||
|
the **docker ps** command. It can be on any node of the swarm cluster,
|
||||||
|
manager or worker. When you find it, you can kill it, with **docker stop
|
||||||
|
<container id>** and you'll see it respawn on a different node of the
|
||||||
|
swarm cluster. Now you see, if one of your servers falls, or if docker
|
||||||
|
stops unexpectedly, your end user will still be able to access your
|
||||||
|
local S3 Server.
|
||||||
|
|
||||||
|
Troubleshooting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
To troubleshoot the service you can run:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker service ps s3docker service ps s3
|
||||||
|
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
|
||||||
|
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
|
||||||
|
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
|
||||||
|
|
||||||
|
If the error is truncated it is possible to have a more detailed view of
|
||||||
|
the error by inspecting the docker task ID:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
|
||||||
|
|
||||||
|
Off you go!
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Let us know what you use this functionality for, and if you'd like any
|
||||||
|
specific developments around it. Or, even better: come and contribute to
|
||||||
|
our `Github repository <https://github.com/scality/s3/>`__! We look
|
||||||
|
forward to meeting you!
|
||||||
|
|
||||||
|
|
||||||
|
S3FS
|
||||||
|
====
|
||||||
|
Export your buckets as a filesystem with s3fs on top of s3server
|
||||||
|
|
||||||
|
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
|
||||||
|
tool that allows you to mount an S3 bucket on a filesystem-like backend.
|
||||||
|
It is available both on Debian and RedHat distributions. For this
|
||||||
|
tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
|
||||||
|
Scality's S3 Server.
|
||||||
|
|
||||||
|
Deploying S3 Server with SSL
|
||||||
|
----------------------------
|
||||||
|
|
||||||
|
First, you need to deploy **S3 Server**. This can be done very easily
|
||||||
|
via `our DockerHub
|
||||||
|
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
|
||||||
|
with a file backend).
|
||||||
|
|
||||||
|
*Note:* *- If you don't have docker installed on your machine, here
|
||||||
|
are the `instructions to install it for your
|
||||||
|
distribution <https://docs.docker.com/engine/installation/>`__*
|
||||||
|
|
||||||
|
You also necessarily have to set up SSL with S3Server to use s3fs. We
|
||||||
|
have a nice
|
||||||
|
`tutorial <https://s3.scality.com/v1.0/page/scality-with-ssl>`__ to help
|
||||||
|
you do it.
|
||||||
|
|
||||||
|
s3fs setup
|
||||||
|
----------
|
||||||
|
|
||||||
|
Installing s3fs
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
s3fs has quite a few dependencies. As explained in their
|
||||||
|
`README <https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation>`__,
|
||||||
|
the following commands should install everything for Ubuntu 14.04:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
|
||||||
|
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
|
||||||
|
|
||||||
|
Now you want to install s3fs per se:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
|
||||||
|
$> cd s3fs-fuse
|
||||||
|
$> ./autogen.sh
|
||||||
|
$> ./configure
|
||||||
|
$> make
|
||||||
|
$> sudo make install
|
||||||
|
|
||||||
|
Check that s3fs is properly installed by checking its version. it should
|
||||||
|
answer as below:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> s3fs --version
|
||||||
|
|
||||||
|
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
|
||||||
|
|
||||||
|
Configuring s3fs
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
s3fs expects you to provide it with a password file. Our file is
|
||||||
|
``/etc/passwd-s3fs``. The structure for this file is
|
||||||
|
``ACCESSKEYID:SECRETKEYID``, so, for S3Server, you can run:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
|
||||||
|
$> chmod 600 /etc/passwd-s3fs
|
||||||
|
|
||||||
|
Using S3Server with s3fs
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> mkdir /mnt/tests3fs
|
||||||
|
|
||||||
|
Then, you want to create a bucket on your local S3Server; we named it
|
||||||
|
``tests3fs``:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> s3cmd mb s3://tests3fs
|
||||||
|
|
||||||
|
*Note:* *- If you've never used s3cmd with our S3Server, our README
|
||||||
|
provides you with a `recommended
|
||||||
|
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
|
||||||
|
|
||||||
|
Now you can mount your bucket to your mountpoint with s3fs:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
|
||||||
|
|
||||||
|
*If you're curious, the structure of this command is*
|
||||||
|
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
|
||||||
|
options are mandatory and serve the following purposes:
|
||||||
|
* ``passwd_file``\ *: specifiy path to password file;
|
||||||
|
* ``url``\ *: specify the hostname used by your SSL provider;
|
||||||
|
* ``use_path_request_style``\ *: force path style (by default, s3fs
|
||||||
|
uses subdomains (DNS style)).*
|
||||||
|
|
||||||
|
| From now on, you can either add files to your mountpoint, or add
|
||||||
|
objects to your bucket, and they'll show in the other.
|
||||||
|
| For example, let's' create two files, and then a directory with a file
|
||||||
|
in our mountpoint:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
|
||||||
|
$> mkdir /mnt/tests3fs/dir1
|
||||||
|
$> touch /mnt/tests3fs/dir1/file3
|
||||||
|
|
||||||
|
Now, I can use s3cmd to show me what is actually in S3Server:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> s3cmd ls -r s3://tests3fs
|
||||||
|
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/dir1/
|
||||||
|
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file1
|
||||||
|
2017-02-28 17:28 0 s3://tests3fs/file2
|
||||||
|
|
||||||
|
Now you can enjoy a filesystem view on your local S3Server!
|
||||||
|
|
||||||
|
|
||||||
|
Duplicity
|
||||||
|
=========
|
||||||
|
|
||||||
|
How to backup your files with S3 Server.
|
||||||
|
|
||||||
|
Installing
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Installing Duplicity and its dependencies
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Second, you want to install
|
||||||
|
`Duplicity <http://duplicity.nongnu.org/index.html>`__. You have to
|
||||||
|
download `this
|
||||||
|
tarball <https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz>`__,
|
||||||
|
decompress it, and then checkout the README inside, which will give you
|
||||||
|
a list of dependencies to install. If you're using Ubuntu 14.04, this is
|
||||||
|
your lucky day: here is a lazy step by step install.
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> apt-get install librsync-dev gnupg
|
||||||
|
$> apt-get install python-dev python-pip python-lockfile
|
||||||
|
$> pip install -U boto
|
||||||
|
|
||||||
|
Then you want to actually install Duplicity:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> tar zxvf duplicity-0.7.11.tar.gz
|
||||||
|
$> cd duplicity-0.7.11
|
||||||
|
$> python setup.py install
|
||||||
|
|
||||||
|
Using
|
||||||
|
------
|
||||||
|
|
||||||
|
Testing your installation
|
||||||
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
First, we're just going to quickly check that S3 Server is actually
|
||||||
|
running. To do so, simply run ``$> docker ps`` . You should see one
|
||||||
|
container named ``scality/s3server``. If that is not the case, try
|
||||||
|
``$> docker start s3server``, and check again.
|
||||||
|
|
||||||
|
Secondly, as you probably know, Duplicity uses a module called **Boto**
|
||||||
|
to send requests to S3. Boto requires a configuration file located in
|
||||||
|
**``/etc/boto.cfg``** to have your credentials and preferences. Here is
|
||||||
|
a minimalistic config `that you can finetune following these
|
||||||
|
instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
[Credentials]
|
||||||
|
aws_access_key_id = accessKey1
|
||||||
|
aws_secret_access_key = verySecretKey1
|
||||||
|
|
||||||
|
[Boto]
|
||||||
|
# If using SSL, set to True
|
||||||
|
is_secure = False
|
||||||
|
# If using SSL, unmute and provide absolute path to local CA certificate
|
||||||
|
# ca_certificates_file = /absolute/path/to/ca.crt
|
||||||
|
|
||||||
|
*Note:* *If you want to set up SSL with S3 Server, check out our
|
||||||
|
`tutorial <http://link/to/SSL/tutorial>`__*
|
||||||
|
|
||||||
|
At this point, we've met all the requirements to start running S3 Server
|
||||||
|
as a backend to Duplicity. So we should be able to back up a local
|
||||||
|
folder/file to local S3. Let's try with the duplicity decompressed
|
||||||
|
folder:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
|
||||||
|
|
||||||
|
*Note:* *Duplicity will prompt you for a symmetric encryption
|
||||||
|
passphrase. Save it somewhere as you will need it to recover your
|
||||||
|
data. Alternatively, you can also add the ``--no-encryption`` flag
|
||||||
|
and the data will be stored plain.*
|
||||||
|
|
||||||
|
If this command is succesful, you will get an output looking like this:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
--------------[ Backup Statistics ]--------------
|
||||||
|
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
|
||||||
|
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
|
||||||
|
ElapsedTime 0.27 (0.27 seconds)
|
||||||
|
SourceFiles 388
|
||||||
|
SourceFileSize 6634529 (6.33 MB)
|
||||||
|
NewFiles 388
|
||||||
|
NewFileSize 6634529 (6.33 MB)
|
||||||
|
DeletedFiles 0
|
||||||
|
ChangedFiles 0
|
||||||
|
ChangedFileSize 0 (0 bytes)
|
||||||
|
ChangedDeltaSize 0 (0 bytes)
|
||||||
|
DeltaEntries 388
|
||||||
|
RawDeltaSize 6392865 (6.10 MB)
|
||||||
|
TotalDestinationSizeChange 2003677 (1.91 MB)
|
||||||
|
Errors 0
|
||||||
|
-------------------------------------------------
|
||||||
|
|
||||||
|
Congratulations! You can now backup to your local S3 through duplicity
|
||||||
|
:)
|
||||||
|
|
||||||
|
Automating backups
|
||||||
|
~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Now you probably want to back up your files periodically. The easiest
|
||||||
|
way to do this is to write a bash script and add it to your crontab.
|
||||||
|
Here is my suggestion for such a file:
|
||||||
|
|
||||||
|
.. code:: sh
|
||||||
|
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Export your passphrase so you don't have to type anything
|
||||||
|
export PASSPHRASE="mypassphrase"
|
||||||
|
|
||||||
|
# If you want to use a GPG Key, put it here and unmute the line below
|
||||||
|
#GPG_KEY=
|
||||||
|
|
||||||
|
# Define your backup bucket, with localhost specified
|
||||||
|
DEST="s3://127.0.0.1:8000/testbuckets3server/"
|
||||||
|
|
||||||
|
# Define the absolute path to the folder you want to backup
|
||||||
|
SOURCE=/root/testfolder
|
||||||
|
|
||||||
|
# Set to "full" for full backups, and "incremental" for incremental backups
|
||||||
|
# Warning: you have to perform one full backup befor you can perform
|
||||||
|
# incremental ones on top of it
|
||||||
|
FULL=incremental
|
||||||
|
|
||||||
|
# How long to keep backups for; if you don't want to delete old
|
||||||
|
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
|
||||||
|
# for one month, "1D" for one day
|
||||||
|
OLDER_THAN="1Y"
|
||||||
|
|
||||||
|
# is_running checks whether duplicity is currently completing a task
|
||||||
|
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
|
||||||
|
|
||||||
|
# If duplicity is already completing a task, this will simply not run
|
||||||
|
if [ $is_running -eq 0 ]; then
|
||||||
|
echo "Backup for ${SOURCE} started"
|
||||||
|
|
||||||
|
# If you want to delete backups older than a certain time, we do it here
|
||||||
|
if [ "$OLDER_THAN" != "" ]; then
|
||||||
|
echo "Removing backups older than ${OLDER_THAN}"
|
||||||
|
duplicity remove-older-than ${OLDER_THAN} ${DEST}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# This is where the actual backup takes place
|
||||||
|
echo "Backing up ${SOURCE}..."
|
||||||
|
duplicity ${FULL} \
|
||||||
|
${SOURCE} ${DEST}
|
||||||
|
# If you're using GPG, paste this in the command above
|
||||||
|
# --encrypt-key=${GPG_KEY} --sign-key=${GPG_KEY} \
|
||||||
|
# If you want to exclude a subfolder/file, put it below and
|
||||||
|
# paste this
|
||||||
|
# in the command above
|
||||||
|
# --exclude=/${SOURCE}/path_to_exclude \
|
||||||
|
|
||||||
|
echo "Backup for ${SOURCE} complete"
|
||||||
|
echo "------------------------------------"
|
||||||
|
fi
|
||||||
|
# Forget the passphrase...
|
||||||
|
unset PASSPHRASE
|
||||||
|
|
||||||
|
So let's say you put this file in ``/usr/local/sbin/backup.sh.`` Next
|
||||||
|
you want to run ``crontab -e`` and paste your configuration in the file
|
||||||
|
that opens. If you're unfamiliar with Cron, here is a good `How
|
||||||
|
To <https://help.ubuntu.com/community/CronHowto>`__. The folder I'm
|
||||||
|
backing up is a folder I modify permanently during my workday, so I want
|
||||||
|
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
|
||||||
|
the line I will paste in my crontab:
|
||||||
|
|
||||||
|
.. code:: cron
|
||||||
|
|
||||||
|
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
|
||||||
|
|
||||||
|
Now I can try and add / remove files from the folder I'm backing up, and
|
||||||
|
I will see incremental backups in my bucket.
|
|
@ -0,0 +1,161 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Zope docs documentation build configuration file, created by
|
||||||
|
# sphinx-quickstart on Fri Feb 20 16:22:03 2009.
|
||||||
|
#
|
||||||
|
# This file is execfile()d with the current directory set to its containing
|
||||||
|
# dir.
|
||||||
|
#
|
||||||
|
# The contents of this file are pickled, so don't put values in the namespace
|
||||||
|
# that aren't pickleable (module imports are okay, they're removed
|
||||||
|
# automatically).
|
||||||
|
#
|
||||||
|
# Note that not all possible configuration values are present in this
|
||||||
|
# autogenerated file.
|
||||||
|
#
|
||||||
|
# All configuration values have a default; values that are commented out
|
||||||
|
# serve to show the default.
|
||||||
|
|
||||||
|
# import sys
|
||||||
|
# import os
|
||||||
|
|
||||||
|
# If your extensions are in another directory, add it here. If the directory
|
||||||
|
# is relative to the documentation root, use os.path.abspath to make it
|
||||||
|
# absolute, like shown here.
|
||||||
|
# sys.path.append(os.path.abspath('.'))
|
||||||
|
|
||||||
|
# General configuration
|
||||||
|
# ---------------------
|
||||||
|
|
||||||
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
|
# extensions
|
||||||
|
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||||
|
extensions = []
|
||||||
|
|
||||||
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
|
templates_path = ['_templates']
|
||||||
|
|
||||||
|
# The suffix of source filenames.
|
||||||
|
source_suffix = '.rst'
|
||||||
|
|
||||||
|
# The encoding of source files.
|
||||||
|
# source_encoding = 'utf-8'
|
||||||
|
|
||||||
|
# The master toctree document.
|
||||||
|
master_doc = 'index'
|
||||||
|
|
||||||
|
# General information about the project.
|
||||||
|
project = u'scality-s3-server'
|
||||||
|
copyright = u'Apache License Version 2.0, 2004 http://www.apache.org/licenses/'
|
||||||
|
|
||||||
|
# The version info for the project you're documenting, acts as replacement for
|
||||||
|
# |version| and |release|, also used in various other places throughout the
|
||||||
|
# built documents.
|
||||||
|
#
|
||||||
|
# The short X.Y version.
|
||||||
|
version = '7.0.0'
|
||||||
|
# The full version, including alpha/beta/rc tags.
|
||||||
|
release = '7.0.0'
|
||||||
|
|
||||||
|
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||||
|
# for a list of supported languages.
|
||||||
|
# language = None
|
||||||
|
|
||||||
|
# There are two options for replacing |today|: either, you set today to some
|
||||||
|
# non-false value, then it is used:
|
||||||
|
# today = ''
|
||||||
|
# Else, today_fmt is used as the format for a strftime call.
|
||||||
|
# today_fmt = '%B %d, %Y'
|
||||||
|
|
||||||
|
# List of documents that shouldn't be included in the build.
|
||||||
|
# unused_docs = []
|
||||||
|
|
||||||
|
# List of directories, relative to source directory, that shouldn't be searched
|
||||||
|
# for source files.
|
||||||
|
exclude_trees = ['_build']
|
||||||
|
|
||||||
|
# The reST default role (used for this markup: `text`) to use for
|
||||||
|
# all documents.
|
||||||
|
# default_role = None
|
||||||
|
|
||||||
|
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||||
|
# add_function_parentheses = True
|
||||||
|
|
||||||
|
# If true, the current module name will be prepended to all description
|
||||||
|
# unit titles (such as .. function::).
|
||||||
|
# add_module_names = True
|
||||||
|
|
||||||
|
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||||
|
# output. They are ignored by default.
|
||||||
|
# show_authors = False
|
||||||
|
|
||||||
|
# The name of the Pygments (syntax highlighting) style to use.
|
||||||
|
pygments_style = 'sphinx'
|
||||||
|
|
||||||
|
|
||||||
|
# Options for HTML output
|
||||||
|
# -----------------------
|
||||||
|
|
||||||
|
# The style sheet to use for HTML and HTML Help pages. A file of that name
|
||||||
|
# must exist either in Sphinx' static/ path, or in one of the custom paths
|
||||||
|
# given in html_static_path.
|
||||||
|
html_style = 'css/default.css'
|
||||||
|
|
||||||
|
# The name for this set of Sphinx documents. If None, it defaults to
|
||||||
|
# "<project> v<release> documentation".
|
||||||
|
# html_title = None
|
||||||
|
|
||||||
|
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||||
|
# html_short_title = None
|
||||||
|
|
||||||
|
# The name of an image file (relative to this directory) to place at the top
|
||||||
|
# of the sidebar.
|
||||||
|
html_logo = '../res/Scality-S3-Server-Logo-Large.png'
|
||||||
|
|
||||||
|
# The name of an image file (within the static path) to use as favicon of the
|
||||||
|
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||||
|
# pixels large.
|
||||||
|
# html_favicon = None
|
||||||
|
|
||||||
|
# Add any paths that contain custom static files (such as style sheets) here,
|
||||||
|
# relative to this directory. They are copied after the builtin static files,
|
||||||
|
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||||
|
html_static_path = ['_static']
|
||||||
|
|
||||||
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||||
|
# using the given strftime format.
|
||||||
|
# html_last_updated_fmt = '%b %d, %Y'
|
||||||
|
|
||||||
|
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||||
|
# typographically correct entities.
|
||||||
|
# html_use_smartypants = True
|
||||||
|
|
||||||
|
# Custom sidebar templates, maps document names to template names.
|
||||||
|
# html_sidebars = {}
|
||||||
|
|
||||||
|
# Additional templates that should be rendered to pages, maps page names to
|
||||||
|
# template names.
|
||||||
|
# html_additional_pages = {}
|
||||||
|
|
||||||
|
# If false, no module index is generated.
|
||||||
|
# html_use_modindex = True
|
||||||
|
|
||||||
|
# If false, no index is generated.
|
||||||
|
# html_use_index = True
|
||||||
|
|
||||||
|
# If true, the index is split into individual pages for each letter.
|
||||||
|
# html_split_index = False
|
||||||
|
|
||||||
|
# If true, the reST sources are included in the HTML build as _sources/<name>.
|
||||||
|
# html_copy_source = True
|
||||||
|
|
||||||
|
# If true, an OpenSearch description file will be output, and all pages will
|
||||||
|
# contain a <link> tag referring to it. The value of this option must be the
|
||||||
|
# base URL from which the finished HTML is served.
|
||||||
|
# html_use_opensearch = ''
|
||||||
|
|
||||||
|
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||||
|
# html_file_suffix = ''
|
||||||
|
|
||||||
|
# Output file base name for HTML help builder.
|
||||||
|
htmlhelp_basename = 'ScalityS3doc'
|
Binary file not shown.
After Width: | Height: | Size: 42 KiB |
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 45 KiB |
|
@ -0,0 +1,15 @@
|
||||||
|
Scality S3 Server
|
||||||
|
==================
|
||||||
|
|
||||||
|
.. _user-docs:
|
||||||
|
|
||||||
|
.. toctree::
|
||||||
|
:maxdepth: 2
|
||||||
|
:caption: Documentation
|
||||||
|
|
||||||
|
CONTRIBUTING
|
||||||
|
GETTING_STARTED
|
||||||
|
CLIENTS
|
||||||
|
DOCKER
|
||||||
|
INTEGRATIONS
|
||||||
|
ARCHITECTURE
|
|
@ -0,0 +1,4 @@
|
||||||
|
# http://www.mkdocs.org/user-guide/configuration/
|
||||||
|
# https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes
|
||||||
|
|
||||||
|
site_name: Scality S3 documentation
|
3
index.js
3
index.js
|
@ -1,4 +1,3 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('babel-core/register')();
|
require('./lib/server.js')();
|
||||||
require('./lib/server.js').default();
|
|
||||||
|
|
83
init.js
83
init.js
|
@ -1,83 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
require('babel-core/register');
|
|
||||||
|
|
||||||
const assert = require('assert');
|
|
||||||
const fs = require('fs');
|
|
||||||
const os = require('os');
|
|
||||||
|
|
||||||
const async = require('async');
|
|
||||||
const constants = require('./constants').default;
|
|
||||||
const config = require('./lib/Config.js').default;
|
|
||||||
const logger = require('./lib/utilities/logger.js').logger;
|
|
||||||
|
|
||||||
let ioctl;
|
|
||||||
try {
|
|
||||||
ioctl = require('ioctl');
|
|
||||||
} catch (err) {
|
|
||||||
logger.warn('ioctl dependency is unavailable. skipping...');
|
|
||||||
}
|
|
||||||
|
|
||||||
function _setDirSyncFlag(path) {
|
|
||||||
const GETFLAGS = 2148034049;
|
|
||||||
const SETFLAGS = 1074292226;
|
|
||||||
const FS_DIRSYNC_FL = 65536;
|
|
||||||
const buffer = Buffer.alloc(8, 0);
|
|
||||||
const pathFD = fs.openSync(path, 'r');
|
|
||||||
const status = ioctl(pathFD, GETFLAGS, buffer);
|
|
||||||
assert.strictEqual(status, 0);
|
|
||||||
const currentFlags = buffer.readUIntLE(0, 8);
|
|
||||||
const flags = currentFlags | FS_DIRSYNC_FL;
|
|
||||||
buffer.writeUIntLE(flags, 0, 8);
|
|
||||||
const status2 = ioctl(pathFD, SETFLAGS, buffer);
|
|
||||||
assert.strictEqual(status2, 0);
|
|
||||||
fs.closeSync(pathFD);
|
|
||||||
const pathFD2 = fs.openSync(path, 'r');
|
|
||||||
const confirmBuffer = Buffer.alloc(8, 0);
|
|
||||||
ioctl(pathFD2, GETFLAGS, confirmBuffer);
|
|
||||||
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
|
|
||||||
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
|
|
||||||
logger.info('FS_DIRSYNC_FL set');
|
|
||||||
fs.closeSync(pathFD2);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (config.backends.data !== 'file' && config.backends.metadata !== 'file') {
|
|
||||||
logger.info('No init required. Go forth and store data.');
|
|
||||||
process.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
const dataPath = config.filePaths.dataPath;
|
|
||||||
const metadataPath = config.filePaths.metadataPath;
|
|
||||||
|
|
||||||
fs.accessSync(dataPath, fs.F_OK | fs.R_OK | fs.W_OK);
|
|
||||||
fs.accessSync(metadataPath, fs.F_OK | fs.R_OK | fs.W_OK);
|
|
||||||
const warning = 'WARNING: Synchronization directory updates are not ' +
|
|
||||||
'supported on this platform. Newly written data could be lost ' +
|
|
||||||
'if your system crashes before the operating system is able to ' +
|
|
||||||
'write directory updates.';
|
|
||||||
if (os.type() === 'Linux' && os.endianness() === 'LE' && ioctl) {
|
|
||||||
try {
|
|
||||||
_setDirSyncFlag(dataPath);
|
|
||||||
_setDirSyncFlag(metadataPath);
|
|
||||||
} catch (err) {
|
|
||||||
logger.warn(warning, { error: err.stack });
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logger.warn(warning);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create 3511 subdirectories for the data file backend
|
|
||||||
const subDirs = Array.from({ length: constants.folderHash },
|
|
||||||
(v, k) => (k).toString());
|
|
||||||
async.eachSeries(subDirs, (subDirName, next) => {
|
|
||||||
fs.mkdir(`${dataPath}/${subDirName}`, err => {
|
|
||||||
// If already exists, move on
|
|
||||||
if (err && err.code !== 'EEXIST') {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
err => {
|
|
||||||
assert.strictEqual(err, null, `Error creating data files ${err}`);
|
|
||||||
logger.info('Init complete. Go forth and store data.');
|
|
||||||
});
|
|
349
lib/Config.js
349
lib/Config.js
|
@ -1,13 +1,97 @@
|
||||||
import assert from 'assert';
|
const assert = require('assert');
|
||||||
import fs from 'fs';
|
const fs = require('fs');
|
||||||
import path from 'path';
|
const path = require('path');
|
||||||
|
|
||||||
import authDataChecker from './auth/in_memory/checker';
|
const authDataChecker = require('./auth/in_memory/checker');
|
||||||
|
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
||||||
|
|
||||||
// whitelist IP, CIDR for health checks
|
// whitelist IP, CIDR for health checks
|
||||||
const defaultHealthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
|
const defaultHealthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
|
||||||
|
|
||||||
const defaultLocalCache = { host: '127.0.0.1', port: 6379 };
|
const defaultLocalCache = { host: '127.0.0.1', port: 6379 };
|
||||||
|
|
||||||
|
function sproxydAssert(configSproxyd) {
|
||||||
|
const sproxydFields = [];
|
||||||
|
if (configSproxyd.bootstrap !== undefined) {
|
||||||
|
assert(Array.isArray(configSproxyd.bootstrap)
|
||||||
|
&& configSproxyd.bootstrap
|
||||||
|
.every(e => typeof e === 'string'),
|
||||||
|
'bad config: sproxyd.bootstrap must be an array of strings');
|
||||||
|
assert(configSproxyd.bootstrap.length > 0,
|
||||||
|
'bad config: sproxyd bootstrap list is empty');
|
||||||
|
sproxydFields.push('bootstrap');
|
||||||
|
}
|
||||||
|
if (configSproxyd.chordCos !== undefined) {
|
||||||
|
assert(typeof configSproxyd.chordCos === 'string',
|
||||||
|
'bad config: sproxyd.chordCos must be a string');
|
||||||
|
assert(configSproxyd.chordCos.match(/^[0-6]{1}$/),
|
||||||
|
'bad config: sproxyd.chordCos must be a digit smaller than 7');
|
||||||
|
sproxydFields.push('chordCos');
|
||||||
|
}
|
||||||
|
if (configSproxyd.path !== undefined) {
|
||||||
|
assert(typeof configSproxyd.path === 'string',
|
||||||
|
'bad config: sproxyd.path must be a string');
|
||||||
|
sproxydFields.push('path');
|
||||||
|
}
|
||||||
|
return sproxydFields;
|
||||||
|
}
|
||||||
|
|
||||||
|
function locationConstraintAssert(locationConstraints) {
|
||||||
|
const supportedBackends = ['mem', 'file', 'scality', 'aws_s3'];
|
||||||
|
|
||||||
|
assert(typeof locationConstraints === 'object',
|
||||||
|
'bad config: locationConstraints must be an object');
|
||||||
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
assert(typeof locationConstraints[l] === 'object',
|
||||||
|
'bad config: locationConstraints[region] must be an object');
|
||||||
|
assert(typeof locationConstraints[l].type === 'string',
|
||||||
|
'bad config: locationConstraints[region].type is ' +
|
||||||
|
'mandatory and must be a string');
|
||||||
|
assert(supportedBackends.indexOf(locationConstraints[l].type) > -1,
|
||||||
|
'bad config: locationConstraints[region].type must ' +
|
||||||
|
`be one of ${supportedBackends}`);
|
||||||
|
assert(typeof locationConstraints[l].legacyAwsBehavior
|
||||||
|
=== 'boolean',
|
||||||
|
'bad config: locationConstraints[region]' +
|
||||||
|
'.legacyAwsBehavior is mandatory and must be a boolean');
|
||||||
|
assert(typeof locationConstraints[l].details
|
||||||
|
=== 'object',
|
||||||
|
'bad config: locationConstraints[region].details is ' +
|
||||||
|
'mandatory and must be an object');
|
||||||
|
const details = locationConstraints[l].details;
|
||||||
|
const stringFields = [
|
||||||
|
'awsEndpoint',
|
||||||
|
'bucketName',
|
||||||
|
'credentialsProfile',
|
||||||
|
];
|
||||||
|
stringFields.forEach(field => {
|
||||||
|
if (details[field] !== undefined) {
|
||||||
|
assert(typeof details[field] === 'string',
|
||||||
|
`bad config: ${field} must be a string`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (details.bucketMatch !== undefined) {
|
||||||
|
assert(typeof details.bucketMatch === 'boolean',
|
||||||
|
'bad config: details.bucketMatch must be a boolean');
|
||||||
|
}
|
||||||
|
if (details.credentials !== undefined) {
|
||||||
|
assert(typeof details.credentials === 'object',
|
||||||
|
'bad config: details.credentials must be an object');
|
||||||
|
assert(typeof details.credentials.accessKey === 'string',
|
||||||
|
'bad config: credentials must include accessKey as string');
|
||||||
|
assert(typeof details.credentials.secretKey === 'string',
|
||||||
|
'bad config: credentials must include secretKey as string');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
assert(Object.keys(locationConstraints)
|
||||||
|
.includes('us-east-1'), 'bad locationConfig: must ' +
|
||||||
|
'include us-east-1 as a locationConstraint');
|
||||||
|
}
|
||||||
|
|
||||||
|
function cosParse(chordCos) {
|
||||||
|
// Cos number should only be first digit of config value
|
||||||
|
return Number.parseInt(chordCos, 10);
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Reads from a config file and returns the content as a config object
|
* Reads from a config file and returns the content as a config object
|
||||||
*/
|
*/
|
||||||
|
@ -16,21 +100,87 @@ class Config {
|
||||||
/*
|
/*
|
||||||
* By default, the config file is "config.json" at the root.
|
* By default, the config file is "config.json" at the root.
|
||||||
* It can be overridden using the S3_CONFIG_FILE environment var.
|
* It can be overridden using the S3_CONFIG_FILE environment var.
|
||||||
|
* By default, the location config file is "locationConfig.json" at
|
||||||
|
* the root.
|
||||||
|
* It can be overridden using the S3_LOCATION_FILE environment var.
|
||||||
*/
|
*/
|
||||||
this._basePath = path.join(__dirname, '..');
|
this._basePath = path.join(__dirname, '..');
|
||||||
this.path = path.join(__dirname, '../config.json');
|
this.configPath = path.join(__dirname, '../config.json');
|
||||||
if (process.env.S3_CONFIG_FILE !== undefined) {
|
if (process.env.S3_CONFIG_FILE !== undefined) {
|
||||||
this.path = process.env.S3_CONFIG_FILE;
|
this.configPath = process.env.S3_CONFIG_FILE;
|
||||||
|
}
|
||||||
|
this.locationConfigPath = path.join(__dirname,
|
||||||
|
'../locationConfig.json');
|
||||||
|
if (process.env.S3_LOCATION_FILE !== undefined) {
|
||||||
|
this.locationConfigPath = process.env.S3_LOCATION_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
|
this._getLocationConfig();
|
||||||
|
this._configureBackends();
|
||||||
|
}
|
||||||
|
|
||||||
|
_getLocationConfig() {
|
||||||
|
let locationConfig;
|
||||||
|
try {
|
||||||
|
const data = fs.readFileSync(this.locationConfigPath,
|
||||||
|
{ encoding: 'utf-8' });
|
||||||
|
locationConfig = JSON.parse(data);
|
||||||
|
} catch (err) {
|
||||||
|
throw new Error(`could not parse location config file:
|
||||||
|
${err.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.locationConstraints = {};
|
||||||
|
locationConstraintAssert(locationConfig);
|
||||||
|
this.locationConstraints = locationConfig;
|
||||||
|
Object.keys(locationConfig).forEach(l => {
|
||||||
|
const details = this.locationConstraints[l].details;
|
||||||
|
if (locationConfig[l].details.connector !== undefined) {
|
||||||
|
assert(typeof locationConfig[l].details.connector ===
|
||||||
|
'object', 'bad config: connector must be an object');
|
||||||
|
if (locationConfig[l].details.connector.sproxyd !==
|
||||||
|
undefined) {
|
||||||
|
details.connector.sproxyd =
|
||||||
|
locationConfig[l].details.connector.sproxyd;
|
||||||
|
const fields = sproxydAssert(
|
||||||
|
locationConfig[l].details.connector.sproxyd);
|
||||||
|
if (fields.indexOf('bootstrap') > -1) {
|
||||||
|
details.connector.sproxyd.bootstrap =
|
||||||
|
locationConfig[l].details.connector.sproxyd.bootstrap;
|
||||||
|
assert(Array.isArray(
|
||||||
|
details.connector.sproxyd.bootstrap) &&
|
||||||
|
details.connector.sproxyd.bootstrap.every(e =>
|
||||||
|
typeof e === 'string'),
|
||||||
|
'assignment error: sproxyd.bootstrap must be ' +
|
||||||
|
'an array of strings');
|
||||||
|
}
|
||||||
|
if (fields.indexOf('chordCos') > -1) {
|
||||||
|
details.connector.sproxyd.chordCos =
|
||||||
|
cosParse(locationConfig[l].details.connector.
|
||||||
|
sproxyd.chordCos);
|
||||||
|
assert(typeof details.connector.sproxyd.chordCos ===
|
||||||
|
'number', 'assignment error: chordCos must be a ' +
|
||||||
|
'number');
|
||||||
|
}
|
||||||
|
if (fields.indexOf('path') > -1) {
|
||||||
|
details.connector.sproxyd.chordCos =
|
||||||
|
locationConfig[l].details.connector.sproxyd.path;
|
||||||
|
assert(typeof details.connector.sproxyd.chordCos ===
|
||||||
|
'string', 'assignment error: sproxyd path must ' +
|
||||||
|
'be a string');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
_getConfig() {
|
_getConfig() {
|
||||||
let config;
|
let config;
|
||||||
try {
|
try {
|
||||||
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
|
const data = fs.readFileSync(this.configPath,
|
||||||
|
{ encoding: 'utf-8' });
|
||||||
config = JSON.parse(data);
|
config = JSON.parse(data);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw new Error(`could not parse config file: ${err.message}`);
|
throw new Error(`could not parse config file: ${err.message}`);
|
||||||
|
@ -63,13 +213,33 @@ class Config {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(typeof config.regions === 'object',
|
if (config.replicationGroupId) {
|
||||||
'bad config: the list of regions is mandatory');
|
assert(typeof config.replicationGroupId === 'string',
|
||||||
assert(Object.keys(config.regions).every(
|
'bad config: replicationGroupId must be a string');
|
||||||
r => typeof r === 'string' && config.regions[r] instanceof Array
|
this.replicationGroupId = config.replicationGroupId;
|
||||||
&& config.regions[r].every(e => typeof e === 'string')),
|
} else {
|
||||||
'bad config: regions must be a set of {region: [endpoints]}');
|
this.replicationGroupId = 'RG001';
|
||||||
this.regions = config.regions;
|
}
|
||||||
|
|
||||||
|
// legacy
|
||||||
|
if (config.regions !== undefined) {
|
||||||
|
throw new Error('bad config: regions key is deprecated. ' +
|
||||||
|
'Please use restEndpoints and locationConfig');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.restEndpoints !== undefined) {
|
||||||
|
this.restEndpoints = {};
|
||||||
|
assert(typeof config.restEndpoints === 'object',
|
||||||
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
assert(Object.keys(config.restEndpoints).every(
|
||||||
|
r => typeof config.restEndpoints[r] === 'string'),
|
||||||
|
'bad config: each endpoint must be a string');
|
||||||
|
this.restEndpoints = config.restEndpoints;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config.restEndpoints) {
|
||||||
|
throw new Error('bad config: config must include restEndpoints');
|
||||||
|
}
|
||||||
|
|
||||||
this.websiteEndpoints = [];
|
this.websiteEndpoints = [];
|
||||||
if (config.websiteEndpoints !== undefined) {
|
if (config.websiteEndpoints !== undefined) {
|
||||||
|
@ -86,30 +256,14 @@ class Config {
|
||||||
this.clusters = config.clusters;
|
this.clusters = config.clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.usEastBehavior = false;
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
assert(typeof config.usEastBehavior === 'boolean');
|
throw new Error('bad config: usEastBehavior key is deprecated. ' +
|
||||||
this.usEastBehavior = config.usEastBehavior;
|
'Please use restEndpoints and locationConfig');
|
||||||
}
|
}
|
||||||
this.sproxyd = { bootstrap: [] };
|
// legacy
|
||||||
if (config.sproxyd !== undefined) {
|
if (config.sproxyd !== undefined) {
|
||||||
if (config.sproxyd.bootstrap !== undefined) {
|
throw new Error('bad config: sproxyd key is deprecated. ' +
|
||||||
assert(Array.isArray(config.sproxyd.bootstrap)
|
'Please use restEndpoints and locationConfig');
|
||||||
&& config.sproxyd.bootstrap
|
|
||||||
.every(e => typeof e === 'string'),
|
|
||||||
'bad config: sproxyd.bootstrap must be a list of strings');
|
|
||||||
assert(config.sproxyd.bootstrap.length > 0,
|
|
||||||
'sproxyd bootstrap list is empty');
|
|
||||||
this.sproxyd.bootstrap = config.sproxyd.bootstrap;
|
|
||||||
}
|
|
||||||
if (config.sproxyd.chordCos !== undefined) {
|
|
||||||
assert(typeof config.sproxyd.chordCos === 'string',
|
|
||||||
'bad config: sproxyd.chordCos must be a string');
|
|
||||||
assert(config.sproxyd.chordCos.match(/^[0-9a-fA-F]{2}$/),
|
|
||||||
'bad config: sproxyd.chordCos must be a 2hex-chars string');
|
|
||||||
this.sproxyd.chordCos =
|
|
||||||
Number.parseInt(config.sproxyd.chordCos, 16);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
this.bucketd = { bootstrap: [] };
|
this.bucketd = { bootstrap: [] };
|
||||||
|
@ -137,6 +291,81 @@ class Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (config.dataClient) {
|
||||||
|
this.dataClient = {};
|
||||||
|
assert.strictEqual(typeof config.dataClient.host, 'string',
|
||||||
|
'bad config: data client host must be ' +
|
||||||
|
'a string');
|
||||||
|
this.dataClient.host = config.dataClient.host;
|
||||||
|
|
||||||
|
assert(Number.isInteger(config.dataClient.port)
|
||||||
|
&& config.dataClient.port > 0,
|
||||||
|
'bad config: dataClient port must be a positive ' +
|
||||||
|
'integer');
|
||||||
|
this.dataClient.port = config.dataClient.port;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.metadataClient) {
|
||||||
|
this.metadataClient = {};
|
||||||
|
assert.strictEqual(
|
||||||
|
typeof config.metadataClient.host, 'string',
|
||||||
|
'bad config: metadata client host must be a string');
|
||||||
|
this.metadataClient.host = config.metadataClient.host;
|
||||||
|
|
||||||
|
assert(Number.isInteger(config.metadataClient.port)
|
||||||
|
&& config.metadataClient.port > 0,
|
||||||
|
'bad config: metadata client port must be a ' +
|
||||||
|
'positive integer');
|
||||||
|
this.metadataClient.port = config.metadataClient.port;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.dataDaemon) {
|
||||||
|
this.dataDaemon = {};
|
||||||
|
assert.strictEqual(
|
||||||
|
typeof config.dataDaemon.bindAddress, 'string',
|
||||||
|
'bad config: data daemon bind address must be a string');
|
||||||
|
this.dataDaemon.bindAddress = config.dataDaemon.bindAddress;
|
||||||
|
|
||||||
|
assert(Number.isInteger(config.dataDaemon.port)
|
||||||
|
&& config.dataDaemon.port > 0,
|
||||||
|
'bad config: data daemon port must be a positive ' +
|
||||||
|
'integer');
|
||||||
|
this.dataDaemon.port = config.dataDaemon.port;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configure the file paths for data if using the file
|
||||||
|
* backend. If no path provided, uses data at the root of
|
||||||
|
* the S3 project directory.
|
||||||
|
*/
|
||||||
|
this.dataDaemon.dataPath =
|
||||||
|
process.env.S3DATAPATH ?
|
||||||
|
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.metadataDaemon) {
|
||||||
|
this.metadataDaemon = {};
|
||||||
|
assert.strictEqual(
|
||||||
|
typeof config.metadataDaemon.bindAddress, 'string',
|
||||||
|
'bad config: metadata daemon bind address must be a string');
|
||||||
|
this.metadataDaemon.bindAddress =
|
||||||
|
config.metadataDaemon.bindAddress;
|
||||||
|
|
||||||
|
assert(Number.isInteger(config.metadataDaemon.port)
|
||||||
|
&& config.metadataDaemon.port > 0,
|
||||||
|
'bad config: metadata daemon port must be a ' +
|
||||||
|
'positive integer');
|
||||||
|
this.metadataDaemon.port = config.metadataDaemon.port;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configure the file path for metadata if using the file
|
||||||
|
* backend. If no path provided, uses data and metadata at
|
||||||
|
* the root of the S3 project directory.
|
||||||
|
*/
|
||||||
|
this.metadataDaemon.metadataPath =
|
||||||
|
process.env.S3METADATAPATH ?
|
||||||
|
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
||||||
|
}
|
||||||
|
|
||||||
if (process.env.ENABLE_LOCAL_CACHE) {
|
if (process.env.ENABLE_LOCAL_CACHE) {
|
||||||
this.localCache = defaultLocalCache;
|
this.localCache = defaultLocalCache;
|
||||||
}
|
}
|
||||||
|
@ -328,6 +557,9 @@ class Config {
|
||||||
throw new Error('bad config: both certFilePaths.key and ' +
|
throw new Error('bad config: both certFilePaths.key and ' +
|
||||||
'certFilePaths.cert must be defined');
|
'certFilePaths.cert must be defined');
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_configureBackends() {
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
|
@ -356,15 +588,34 @@ class Config {
|
||||||
if (process.env.S3AUTH_CONFIG) {
|
if (process.env.S3AUTH_CONFIG) {
|
||||||
authfile = process.env.S3AUTH_CONFIG;
|
authfile = process.env.S3AUTH_CONFIG;
|
||||||
}
|
}
|
||||||
const authData = require(authfile);
|
let authData = require(authfile);
|
||||||
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
|
authData = buildAuthDataAccount(
|
||||||
|
process.env.SCALITY_ACCESS_KEY_ID,
|
||||||
|
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||||
|
}
|
||||||
if (authDataChecker(authData)) {
|
if (authDataChecker(authData)) {
|
||||||
throw new Error('bad config: invalid auth config file.');
|
throw new Error('bad config: invalid auth config file.');
|
||||||
}
|
}
|
||||||
this.authData = authData;
|
this.authData = authData;
|
||||||
}
|
}
|
||||||
if (process.env.S3SPROXYD) {
|
if (process.env.S3DATA) {
|
||||||
data = process.env.S3SPROXYD;
|
const validData = ['mem', 'file', 'scality', 'multiple'];
|
||||||
|
assert(validData.indexOf(process.env.S3DATA) > -1,
|
||||||
|
'bad environment variable: S3DATA environment variable ' +
|
||||||
|
'should be one of mem/file/scality/multiple'
|
||||||
|
);
|
||||||
|
data = process.env.S3DATA;
|
||||||
}
|
}
|
||||||
|
if (data === 'scality' || data === 'multiple') {
|
||||||
|
data = 'multiple';
|
||||||
|
}
|
||||||
|
assert(this.locationConstraints !== undefined &&
|
||||||
|
this.restEndpoints !== undefined,
|
||||||
|
'bad config: locationConstraints and restEndpoints must be set'
|
||||||
|
);
|
||||||
|
|
||||||
if (process.env.S3METADATA) {
|
if (process.env.S3METADATA) {
|
||||||
metadata = process.env.S3METADATA;
|
metadata = process.env.S3METADATA;
|
||||||
}
|
}
|
||||||
|
@ -377,21 +628,6 @@ class Config {
|
||||||
metadata,
|
metadata,
|
||||||
kms,
|
kms,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* Configure the file paths for data and metadata
|
|
||||||
* if using the file backend. If no path provided,
|
|
||||||
* uses data and metadata at the root of the S3 project directory
|
|
||||||
*/
|
|
||||||
const dataPath = process.env.S3DATAPATH ?
|
|
||||||
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
|
||||||
const metadataPath = process.env.S3METADATAPATH ?
|
|
||||||
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
|
||||||
this.filePaths = {
|
|
||||||
dataPath,
|
|
||||||
metadataPath,
|
|
||||||
};
|
|
||||||
return config;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_verifyRedisPassword(password) {
|
_verifyRedisPassword(password) {
|
||||||
|
@ -399,4 +635,9 @@ class Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export default new Config();
|
module.exports = {
|
||||||
|
sproxydAssert,
|
||||||
|
locationConstraintAssert,
|
||||||
|
cosParse,
|
||||||
|
config: new Config(),
|
||||||
|
};
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import Redis from 'ioredis';
|
const Redis = require('ioredis');
|
||||||
|
|
||||||
import { logger } from './utilities/logger';
|
const logger = require('./utilities/logger');
|
||||||
|
|
||||||
export default class RedisClient {
|
module.exports = class RedisClient {
|
||||||
/**
|
/**
|
||||||
* @constructor
|
* @constructor
|
||||||
* @param {string} host - Redis host
|
* @param {string} host - Redis host
|
||||||
|
@ -53,4 +53,4 @@ export default class RedisClient {
|
||||||
clear(cb) {
|
clear(cb) {
|
||||||
return this._client.flushDb(cb);
|
return this._client.flushDb(cb);
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
|
|
||||||
export default class StatsClient {
|
class StatsClient {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @constructor
|
* @constructor
|
||||||
|
@ -152,3 +152,5 @@ export default class StatsClient {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = StatsClient;
|
||||||
|
|
187
lib/api/api.js
187
lib/api/api.js
|
@ -1,73 +1,72 @@
|
||||||
import querystring from 'querystring';
|
const { auth, errors } = require('arsenal');
|
||||||
|
|
||||||
import { auth, errors } from 'arsenal';
|
const bucketDelete = require('./bucketDelete');
|
||||||
|
const bucketDeleteCors = require('./bucketDeleteCors');
|
||||||
import bucketDelete from './bucketDelete';
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
import bucketDeleteCors from './bucketDeleteCors';
|
const bucketGet = require('./bucketGet');
|
||||||
import bucketDeleteWebsite from './bucketDeleteWebsite';
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
import bucketGet from './bucketGet';
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
import bucketGetACL from './bucketGetACL';
|
const bucketGetVersioning = require('./bucketGetVersioning');
|
||||||
import bucketGetCors from './bucketGetCors';
|
const bucketGetWebsite = require('./bucketGetWebsite');
|
||||||
import bucketGetVersioning from './bucketGetVersioning';
|
const bucketGetLocation = require('./bucketGetLocation');
|
||||||
import bucketGetWebsite from './bucketGetWebsite';
|
const bucketHead = require('./bucketHead');
|
||||||
import bucketHead from './bucketHead';
|
const { bucketPut } = require('./bucketPut');
|
||||||
import bucketPut from './bucketPut';
|
const bucketPutACL = require('./bucketPutACL');
|
||||||
import bucketPutACL from './bucketPutACL';
|
const bucketPutCors = require('./bucketPutCors');
|
||||||
import bucketPutCors from './bucketPutCors';
|
const bucketPutVersioning = require('./bucketPutVersioning');
|
||||||
import bucketPutVersioning from './bucketPutVersioning';
|
const bucketPutWebsite = require('./bucketPutWebsite');
|
||||||
import bucketPutWebsite from './bucketPutWebsite';
|
const bucketPutReplication = require('./bucketPutReplication');
|
||||||
import corsPreflight from './corsPreflight';
|
const corsPreflight = require('./corsPreflight');
|
||||||
import completeMultipartUpload from './completeMultipartUpload';
|
const completeMultipartUpload = require('./completeMultipartUpload');
|
||||||
import initiateMultipartUpload from './initiateMultipartUpload';
|
const initiateMultipartUpload = require('./initiateMultipartUpload');
|
||||||
import listMultipartUploads from './listMultipartUploads';
|
const listMultipartUploads = require('./listMultipartUploads');
|
||||||
import listParts from './listParts';
|
const listParts = require('./listParts');
|
||||||
import multiObjectDelete from './multiObjectDelete';
|
const { multiObjectDelete } = require('./multiObjectDelete');
|
||||||
import multipartDelete from './multipartDelete';
|
const multipartDelete = require('./multipartDelete');
|
||||||
import objectCopy from './objectCopy';
|
const objectCopy = require('./objectCopy');
|
||||||
import objectDelete from './objectDelete';
|
const objectDelete = require('./objectDelete');
|
||||||
import objectGet from './objectGet';
|
const objectDeleteTagging = require('./objectDeleteTagging');
|
||||||
import objectGetACL from './objectGetACL';
|
const objectGet = require('./objectGet');
|
||||||
import objectHead from './objectHead';
|
const objectGetACL = require('./objectGetACL');
|
||||||
import objectPut from './objectPut';
|
const objectGetTagging = require('./objectGetTagging');
|
||||||
import objectPutACL from './objectPutACL';
|
const objectHead = require('./objectHead');
|
||||||
import objectPutPart from './objectPutPart';
|
const objectPut = require('./objectPut');
|
||||||
import objectPutCopyPart from './objectPutCopyPart';
|
const objectPutACL = require('./objectPutACL');
|
||||||
import prepareRequestContexts from
|
const objectPutTagging = require('./objectPutTagging');
|
||||||
'./apiUtils/authorization/prepareRequestContexts';
|
const objectPutPart = require('./objectPutPart');
|
||||||
import serviceGet from './serviceGet';
|
const objectPutCopyPart = require('./objectPutCopyPart');
|
||||||
import vault from '../auth/vault';
|
const prepareRequestContexts
|
||||||
import websiteGet from './websiteGet';
|
= require('./apiUtils/authorization/prepareRequestContexts');
|
||||||
import websiteHead from './websiteHead';
|
const serviceGet = require('./serviceGet');
|
||||||
|
const vault = require('../auth/vault');
|
||||||
|
const websiteGet = require('./websiteGet');
|
||||||
|
const websiteHead = require('./websiteHead');
|
||||||
|
const writeContinue = require('../utilities/writeContinue');
|
||||||
|
const parseCopySource = require('./apiUtils/object/parseCopySource');
|
||||||
|
|
||||||
auth.setHandler(vault);
|
auth.setHandler(vault);
|
||||||
|
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
const api = {
|
const api = {
|
||||||
callApiMethod(apiMethod, request, log, callback, locationConstraint) {
|
callApiMethod(apiMethod, request, response, log, callback) {
|
||||||
|
let returnTagCount = true;
|
||||||
// no need to check auth on website or cors preflight requests
|
// no need to check auth on website or cors preflight requests
|
||||||
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
|
||||||
apiMethod === 'corsPreflight') {
|
apiMethod === 'corsPreflight') {
|
||||||
return this[apiMethod](request, log, callback);
|
return this[apiMethod](request, log, callback);
|
||||||
}
|
}
|
||||||
let sourceBucket;
|
|
||||||
let sourceObject;
|
const { sourceBucket, sourceObject, sourceVersionId, parsingError } =
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
parseCopySource(apiMethod, request.headers['x-amz-copy-source']);
|
||||||
let source =
|
if (parsingError) {
|
||||||
querystring.unescape(request.headers['x-amz-copy-source']);
|
log.debug('error parsing copy source', {
|
||||||
// If client sends the source bucket/object with a leading /,
|
error: parsingError,
|
||||||
// remove it
|
});
|
||||||
if (source[0] === '/') {
|
return callback(parsingError);
|
||||||
source = source.slice(1);
|
|
||||||
}
|
}
|
||||||
const slashSeparator = source.indexOf('/');
|
|
||||||
if (slashSeparator === -1) {
|
const requestContexts = prepareRequestContexts(apiMethod, request,
|
||||||
return callback(errors.InvalidArgument);
|
sourceBucket, sourceObject, sourceVersionId);
|
||||||
}
|
|
||||||
// Pull the source bucket and source object separated by /
|
|
||||||
sourceBucket = source.slice(0, slashSeparator);
|
|
||||||
sourceObject = source.slice(slashSeparator + 1);
|
|
||||||
}
|
|
||||||
const requestContexts = prepareRequestContexts(apiMethod,
|
|
||||||
request, locationConstraint, sourceBucket, sourceObject);
|
|
||||||
return auth.server.doAuth(request, log, (err, userInfo,
|
return auth.server.doAuth(request, log, (err, userInfo,
|
||||||
authorizationResults, streamingV4Params) => {
|
authorizationResults, streamingV4Params) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -75,6 +74,19 @@ const api = {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
if (authorizationResults) {
|
if (authorizationResults) {
|
||||||
|
if (apiMethod === 'objectGet') {
|
||||||
|
// first item checks s3:GetObject(Version) action
|
||||||
|
if (!authorizationResults[0].isAllowed) {
|
||||||
|
log.trace('get object authorization denial from Vault');
|
||||||
|
return callback(errors.AccessDenied);
|
||||||
|
}
|
||||||
|
// second item checks s3:GetObject(Version)Tagging action
|
||||||
|
if (!authorizationResults[1].isAllowed) {
|
||||||
|
log.trace('get tagging authorization denial ' +
|
||||||
|
'from Vault');
|
||||||
|
returnTagCount = false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
for (let i = 0; i < authorizationResults.length; i++) {
|
for (let i = 0; i < authorizationResults.length; i++) {
|
||||||
if (!authorizationResults[i].isAllowed) {
|
if (!authorizationResults[i].isAllowed) {
|
||||||
log.trace('authorization denial from Vault');
|
log.trace('authorization denial from Vault');
|
||||||
|
@ -82,20 +94,54 @@ const api = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (apiMethod === 'bucketPut') {
|
|
||||||
return bucketPut(userInfo, request, locationConstraint,
|
|
||||||
log, callback);
|
|
||||||
}
|
|
||||||
if (apiMethod === 'objectCopy' ||
|
|
||||||
apiMethod === 'objectPutCopyPart') {
|
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
|
||||||
sourceObject, log, callback);
|
|
||||||
}
|
}
|
||||||
|
// issue 100 Continue to the client
|
||||||
|
writeContinue(request, response);
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
return this[apiMethod](userInfo, request, streamingV4Params,
|
return this[apiMethod](userInfo, request, streamingV4Params,
|
||||||
log, callback);
|
log, callback);
|
||||||
}
|
}
|
||||||
|
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
|
||||||
|
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
|
||||||
|
const post = [];
|
||||||
|
let postLength = 0;
|
||||||
|
request.on('data', chunk => {
|
||||||
|
postLength += chunk.length;
|
||||||
|
// Sanity check on post length
|
||||||
|
if (postLength <= MAX_POST_LENGTH) {
|
||||||
|
post.push(chunk);
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
});
|
||||||
|
|
||||||
|
request.on('error', err => {
|
||||||
|
log.trace('error receiving request', {
|
||||||
|
error: err,
|
||||||
|
});
|
||||||
|
return callback(errors.InternalError);
|
||||||
|
});
|
||||||
|
|
||||||
|
request.on('end', () => {
|
||||||
|
if (postLength > MAX_POST_LENGTH) {
|
||||||
|
log.error('body length is too long for request type',
|
||||||
|
{ postLength });
|
||||||
|
return callback(errors.InvalidRequest);
|
||||||
|
}
|
||||||
|
// Convert array of post buffers into one string
|
||||||
|
request.post = Buffer.concat(post, postLength).toString();
|
||||||
|
|
||||||
|
if (apiMethod === 'objectCopy' ||
|
||||||
|
apiMethod === 'objectPutCopyPart') {
|
||||||
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
|
sourceObject, sourceVersionId, log, callback);
|
||||||
|
}
|
||||||
|
if (apiMethod === 'objectGet') {
|
||||||
|
return this[apiMethod](userInfo, request,
|
||||||
|
returnTagCount, log, callback);
|
||||||
|
}
|
||||||
return this[apiMethod](userInfo, request, log, callback);
|
return this[apiMethod](userInfo, request, log, callback);
|
||||||
|
});
|
||||||
|
return undefined;
|
||||||
}, 's3', requestContexts);
|
}, 's3', requestContexts);
|
||||||
},
|
},
|
||||||
bucketDelete,
|
bucketDelete,
|
||||||
|
@ -106,12 +152,14 @@ const api = {
|
||||||
bucketGetCors,
|
bucketGetCors,
|
||||||
bucketGetVersioning,
|
bucketGetVersioning,
|
||||||
bucketGetWebsite,
|
bucketGetWebsite,
|
||||||
|
bucketGetLocation,
|
||||||
bucketHead,
|
bucketHead,
|
||||||
bucketPut,
|
bucketPut,
|
||||||
bucketPutACL,
|
bucketPutACL,
|
||||||
bucketPutCors,
|
bucketPutCors,
|
||||||
bucketPutVersioning,
|
bucketPutVersioning,
|
||||||
bucketPutWebsite,
|
bucketPutWebsite,
|
||||||
|
bucketPutReplication,
|
||||||
corsPreflight,
|
corsPreflight,
|
||||||
completeMultipartUpload,
|
completeMultipartUpload,
|
||||||
initiateMultipartUpload,
|
initiateMultipartUpload,
|
||||||
|
@ -120,12 +168,15 @@ const api = {
|
||||||
multiObjectDelete,
|
multiObjectDelete,
|
||||||
multipartDelete,
|
multipartDelete,
|
||||||
objectDelete,
|
objectDelete,
|
||||||
|
objectDeleteTagging,
|
||||||
objectGet,
|
objectGet,
|
||||||
objectGetACL,
|
objectGetACL,
|
||||||
|
objectGetTagging,
|
||||||
objectCopy,
|
objectCopy,
|
||||||
objectHead,
|
objectHead,
|
||||||
objectPut,
|
objectPut,
|
||||||
objectPutACL,
|
objectPutACL,
|
||||||
|
objectPutTagging,
|
||||||
objectPutPart,
|
objectPutPart,
|
||||||
objectPutCopyPart,
|
objectPutCopyPart,
|
||||||
serviceGet,
|
serviceGet,
|
||||||
|
@ -133,4 +184,4 @@ const api = {
|
||||||
websiteHead,
|
websiteHead,
|
||||||
};
|
};
|
||||||
|
|
||||||
export default api;
|
module.exports = api;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import constants from '../../../../constants';
|
const constants = require('../../../../constants');
|
||||||
|
|
||||||
export function isBucketAuthorized(bucket, requestType, canonicalID) {
|
function isBucketAuthorized(bucket, requestType, canonicalID) {
|
||||||
// Check to see if user is authorized to perform a
|
// Check to see if user is authorized to perform a
|
||||||
// particular action on bucket based on ACLs.
|
// particular action on bucket based on ACLs.
|
||||||
// TODO: Add IAM checks and bucket policy checks.
|
// TODO: Add IAM checks and bucket policy checks.
|
||||||
|
@ -61,7 +61,7 @@ export function isBucketAuthorized(bucket, requestType, canonicalID) {
|
||||||
requestType === 'objectGet' || requestType === 'objectHead');
|
requestType === 'objectGet' || requestType === 'objectHead');
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
||||||
const bucketOwner = bucket.getOwner();
|
const bucketOwner = bucket.getOwner();
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -69,6 +69,12 @@ export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
||||||
if (objectMD['owner-id'] === canonicalID) {
|
if (objectMD['owner-id'] === canonicalID) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
// account is authorized if:
|
||||||
|
// - requesttype is "bucketOwnerAction" (example: for objectTagging) and
|
||||||
|
// - account is the bucket owner
|
||||||
|
if (requestType === 'bucketOwnerAction' && bucketOwner === canonicalID) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
if (requestType === 'objectGet' || requestType === 'objectHead') {
|
||||||
if (objectMD.acl.Canned === 'public-read'
|
if (objectMD.acl.Canned === 'public-read'
|
||||||
|| objectMD.acl.Canned === 'public-read-write'
|
|| objectMD.acl.Canned === 'public-read-write'
|
||||||
|
@ -111,3 +117,8 @@ export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
isBucketAuthorized,
|
||||||
|
isObjAuthorized,
|
||||||
|
};
|
||||||
|
|
|
@ -1,43 +1,117 @@
|
||||||
import { policies } from 'arsenal';
|
const { policies } = require('arsenal');
|
||||||
const RequestContext = policies.RequestContext;
|
|
||||||
|
|
||||||
|
const RequestContext = policies.RequestContext;
|
||||||
|
let apiMethodAfterVersionCheck;
|
||||||
|
const apiMethodWithVersion = { objectGetACL: true, objectPutACL: true,
|
||||||
|
objectGet: true, objectDelete: true, objectPutTagging: true,
|
||||||
|
objectGetTagging: true, objectDeleteTagging: true };
|
||||||
|
|
||||||
|
function isHeaderAcl(headers) {
|
||||||
|
return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] ||
|
||||||
|
headers['x-amz-grant-write-acp'] || headers['x-amz-grant-full-control'] ||
|
||||||
|
headers['x-amz-acl'];
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prepares the requestContexts array to send to Vault for authorization
|
* Prepares the requestContexts array to send to Vault for authorization
|
||||||
* @param {string} apiMethod - api being called
|
* @param {string} apiMethod - api being called
|
||||||
* @param {object} request - request object
|
* @param {object} request - request object
|
||||||
* @param {string} locationConstraint - locationConstraint if bucket put
|
|
||||||
* operation
|
|
||||||
* @param {string} sourceBucket - name of sourceBucket if copy request
|
* @param {string} sourceBucket - name of sourceBucket if copy request
|
||||||
* @param {string} sourceObject - name of sourceObject if copy request
|
* @param {string} sourceObject - name of sourceObject if copy request
|
||||||
|
* @param {string} sourceVersionId - value of sourceVersionId if copy request
|
||||||
* @return {RequestContext []} array of requestContexts
|
* @return {RequestContext []} array of requestContexts
|
||||||
*/
|
*/
|
||||||
export default function prepareRequestContexts(apiMethod, request,
|
function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
locationConstraint, sourceBucket, sourceObject) {
|
sourceObject, sourceVersionId) {
|
||||||
// if multiObjectDelete request, we want to authenticate
|
// if multiObjectDelete request, we want to authenticate
|
||||||
// before parsing the post body and creating multiple requestContexts
|
// before parsing the post body and creating multiple requestContexts
|
||||||
// so send null as requestContexts to Vault to avoid authorization
|
// so send null as requestContexts to Vault to avoid authorization
|
||||||
// checks at this point
|
// checks at this point
|
||||||
if (apiMethod === 'multiObjectDelete') {
|
//
|
||||||
|
// If bucketPut request, we want to do the authorization check in the API
|
||||||
|
// itself (once we parse the locationConstraint from the xml body) so send
|
||||||
|
// null as the requestContext to Vault so it will only do an authentication
|
||||||
|
// check.
|
||||||
|
|
||||||
|
function generateRequestContext(apiMethod) {
|
||||||
|
return new RequestContext(request.headers,
|
||||||
|
request.query, request.bucketName, request.objectKey,
|
||||||
|
request.socket.remoteAddress, request.connection.encrypted,
|
||||||
|
apiMethod, 's3');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
const requestContexts = [];
|
|
||||||
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
|
if (apiMethodWithVersion[apiMethod] && request.query &&
|
||||||
const getRequestContext = new RequestContext(request.headers,
|
request.query.versionId) {
|
||||||
request.query, sourceBucket, sourceObject,
|
apiMethodAfterVersionCheck = `${apiMethod}Version`;
|
||||||
request.socket.remoteAddress, request.connection.encrypted,
|
|
||||||
'objectGet', 's3', locationConstraint);
|
|
||||||
const putRequestContext = new RequestContext(request.headers,
|
|
||||||
request.query, request.bucketName, request.objectKey,
|
|
||||||
request.socket.remoteAddress, request.connection.encrypted,
|
|
||||||
'objectPut', 's3', locationConstraint);
|
|
||||||
requestContexts.push(getRequestContext, putRequestContext);
|
|
||||||
} else {
|
} else {
|
||||||
const requestContext = new RequestContext(request.headers,
|
apiMethodAfterVersionCheck = apiMethod;
|
||||||
request.query, request.bucketName, request.objectKey,
|
}
|
||||||
|
|
||||||
|
const requestContexts = [];
|
||||||
|
|
||||||
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
|
'objectGet';
|
||||||
|
const reqQuery = Object.assign({}, request.query,
|
||||||
|
{ versionId: sourceVersionId });
|
||||||
|
const getRequestContext = new RequestContext(request.headers,
|
||||||
|
reqQuery, sourceBucket, sourceObject,
|
||||||
request.socket.remoteAddress, request.connection.encrypted,
|
request.socket.remoteAddress, request.connection.encrypted,
|
||||||
apiMethod, 's3', locationConstraint);
|
objectGetAction, 's3');
|
||||||
|
const putRequestContext = generateRequestContext('objectPut');
|
||||||
|
requestContexts.push(getRequestContext, putRequestContext);
|
||||||
|
if (apiMethodAfterVersionCheck === 'objectCopy') {
|
||||||
|
// if tagging directive is COPY, "s3:PutObjectTagging" don't need
|
||||||
|
// to be included in the list of permitted actions in IAM policy
|
||||||
|
if (request.headers['x-amz-tagging'] &&
|
||||||
|
request.headers['x-amz-tagging-directive'] === 'REPLACE') {
|
||||||
|
const putTaggingRequestContext =
|
||||||
|
generateRequestContext('objectPutTagging');
|
||||||
|
requestContexts.push(putTaggingRequestContext);
|
||||||
|
}
|
||||||
|
if (isHeaderAcl(request.headers)) {
|
||||||
|
const putAclRequestContext =
|
||||||
|
generateRequestContext('objectPutACL');
|
||||||
|
requestContexts.push(putAclRequestContext);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectGet'
|
||||||
|
|| apiMethodAfterVersionCheck === 'objectGetVersion') {
|
||||||
|
const objectGetTaggingAction = (request.query &&
|
||||||
|
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||||
|
'objectGetTagging';
|
||||||
|
const getRequestContext =
|
||||||
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
|
const getTaggingRequestContext =
|
||||||
|
generateRequestContext(objectGetTaggingAction);
|
||||||
|
requestContexts.push(getRequestContext, getTaggingRequestContext);
|
||||||
|
} else if (apiMethodAfterVersionCheck === 'objectPut') {
|
||||||
|
const putRequestContext =
|
||||||
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
|
requestContexts.push(putRequestContext);
|
||||||
|
// if put object (versioning) with tag set
|
||||||
|
if (request.headers['x-amz-tagging']) {
|
||||||
|
const putTaggingRequestContext =
|
||||||
|
generateRequestContext('objectPutTagging');
|
||||||
|
requestContexts.push(putTaggingRequestContext);
|
||||||
|
}
|
||||||
|
// if put object (versioning) with ACL
|
||||||
|
if (isHeaderAcl(request.headers)) {
|
||||||
|
const putAclRequestContext =
|
||||||
|
generateRequestContext('objectPutACL');
|
||||||
|
requestContexts.push(putAclRequestContext);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const requestContext =
|
||||||
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
requestContexts.push(requestContext);
|
requestContexts.push(requestContext);
|
||||||
}
|
}
|
||||||
return requestContexts;
|
return requestContexts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = prepareRequestContexts;
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import escapeForXml from '../../../utilities/escapeForXML';
|
const { parseString } = require('xml2js');
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
import { parseString } from 'xml2js';
|
|
||||||
|
const escapeForXml = require('../../../utilities/escapeForXML');
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -32,7 +33,7 @@ const customizedErrs = {
|
||||||
};
|
};
|
||||||
|
|
||||||
// Helper validation methods
|
// Helper validation methods
|
||||||
export const _validator = {
|
const _validator = {
|
||||||
/** _validator.validateNumberWildcards - check if string has multiple
|
/** _validator.validateNumberWildcards - check if string has multiple
|
||||||
* wildcards
|
* wildcards
|
||||||
@param {string} string - string to check for multiple wildcards
|
@param {string} string - string to check for multiple wildcards
|
||||||
|
@ -286,7 +287,7 @@ function _validateCorsXml(rules) {
|
||||||
* @return {undefined} - calls callback with cors object on success, error on
|
* @return {undefined} - calls callback with cors object on success, error on
|
||||||
* failure
|
* failure
|
||||||
*/
|
*/
|
||||||
export function parseCorsXml(xml, log, cb) {
|
function parseCorsXml(xml, log, cb) {
|
||||||
parseString(xml, (err, result) => {
|
parseString(xml, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('xml parsing failed', {
|
log.trace('xml parsing failed', {
|
||||||
|
@ -319,7 +320,7 @@ export function parseCorsXml(xml, log, cb) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export function convertToXml(arrayRules) {
|
function convertToXml(arrayRules) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||||
'<CORSConfiguration>');
|
'<CORSConfiguration>');
|
||||||
|
@ -347,3 +348,9 @@ export function convertToXml(arrayRules) {
|
||||||
xml.push('</CORSConfiguration>');
|
xml.push('</CORSConfiguration>');
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
_validator,
|
||||||
|
parseCorsXml,
|
||||||
|
convertToXml,
|
||||||
|
};
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
import async from 'async';
|
const assert = require('assert');
|
||||||
import assert from 'assert';
|
const async = require('async');
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import acl from '../../../metadata/acl';
|
const acl = require('../../../metadata/acl');
|
||||||
import BucketInfo from '../../../metadata/BucketInfo';
|
const BucketInfo = require('../../../metadata/BucketInfo');
|
||||||
import constants from '../../../../constants';
|
const constants = require('../../../../constants');
|
||||||
import createKeyForUserBucket from './createKeyForUserBucket';
|
const createKeyForUserBucket = require('./createKeyForUserBucket');
|
||||||
import metadata from '../../../metadata/wrapper';
|
const metadata = require('../../../metadata/wrapper');
|
||||||
import kms from '../../../kms/wrapper';
|
const kms = require('../../../kms/wrapper');
|
||||||
|
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
|
||||||
|
|
||||||
const usersBucket = constants.usersBucket;
|
const usersBucket = constants.usersBucket;
|
||||||
const oldUsersBucket = constants.oldUsersBucket;
|
const oldUsersBucket = constants.oldUsersBucket;
|
||||||
|
@ -33,7 +34,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
const usersBucketBeingCalled = usersBucketAttrs ?
|
const usersBucketBeingCalled = usersBucketAttrs ?
|
||||||
usersBucket : oldUsersBucket;
|
usersBucket : oldUsersBucket;
|
||||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||||
omVal, log, err => {
|
omVal, {}, log, err => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err && err.NoSuchBucket) {
|
||||||
// There must be no usersBucket so createBucket
|
// There must be no usersBucket so createBucket
|
||||||
// one using the new format
|
// one using the new format
|
||||||
|
@ -66,7 +67,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
// Finally put the key in the new format
|
// Finally put the key in the new format
|
||||||
// usersBucket
|
// usersBucket
|
||||||
return metadata.putObjectMD(usersBucket,
|
return metadata.putObjectMD(usersBucket,
|
||||||
key, omVal, log, cb);
|
key, omVal, {}, log, cb);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return cb(err);
|
return cb(err);
|
||||||
|
@ -111,7 +112,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
|
||||||
* @param {function} callback - callback with error or null as arguments
|
* @param {function} callback - callback with error or null as arguments
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export function cleanUpBucket(bucketMD, canonicalID, log, callback) {
|
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
|
||||||
const bucketName = bucketMD.getName();
|
const bucketName = bucketMD.getName();
|
||||||
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
return addToUsersBucket(canonicalID, bucketName, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -152,14 +153,12 @@ function bucketLevelServerSideEncryption(bucketName, headers, log, cb) {
|
||||||
* @param {object} headers - request headers
|
* @param {object} headers - request headers
|
||||||
* @param {string} locationConstraint - locationConstraint provided in
|
* @param {string} locationConstraint - locationConstraint provided in
|
||||||
* request body xml (if provided)
|
* request body xml (if provided)
|
||||||
* @param {boolean} usEastBehavior - whether s3 is set up with a usEastBehavior
|
|
||||||
* config option
|
|
||||||
* @param {function} log - Werelogs logger
|
* @param {function} log - Werelogs logger
|
||||||
* @param {function} cb - callback to bucketPut
|
* @param {function} cb - callback to bucketPut
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export function createBucket(authInfo, bucketName, headers,
|
function createBucket(authInfo, bucketName, headers,
|
||||||
locationConstraint, usEastBehavior, log, cb) {
|
locationConstraint, log, cb) {
|
||||||
log.trace('Creating bucket');
|
log.trace('Creating bucket');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
@ -172,8 +171,6 @@ export function createBucket(authInfo, bucketName, headers,
|
||||||
|
|
||||||
if (locationConstraint !== undefined) {
|
if (locationConstraint !== undefined) {
|
||||||
bucket.setLocationConstraint(locationConstraint);
|
bucket.setLocationConstraint(locationConstraint);
|
||||||
} else if (usEastBehavior) {
|
|
||||||
bucket.setLocationConstraint('us-east-1');
|
|
||||||
}
|
}
|
||||||
const parseAclParams = {
|
const parseAclParams = {
|
||||||
headers,
|
headers,
|
||||||
|
@ -238,20 +235,21 @@ export function createBucket(authInfo, bucketName, headers,
|
||||||
log.trace('bucket has transient flag or deleted flag. cleaning up');
|
log.trace('bucket has transient flag or deleted flag. cleaning up');
|
||||||
return cleanUpBucket(newBucketMD, canonicalID, log, cb);
|
return cleanUpBucket(newBucketMD, canonicalID, log, cb);
|
||||||
}
|
}
|
||||||
// If bucket exists in non-transient and non-deleted
|
// If bucket already exists in non-transient and non-deleted
|
||||||
// state and owned by requester then return BucketAlreadyOwnedByYou
|
// state and owned by requester, then return BucketAlreadyOwnedByYou
|
||||||
// error unless old AWS behavior (us-east-1)
|
// error unless old AWS behavior (us-east-1)
|
||||||
// For old behavior:
|
// Existing locationConstraint must have legacyAwsBehavior === true
|
||||||
// 1) new locationConstraint should either be undefined or not us-east-1
|
// New locationConstraint should have legacyAwsBehavior === true
|
||||||
// 2) the existing locationConstraint must be us-east-1 or undefined
|
if (isLegacyAWSBehavior(locationConstraint) &&
|
||||||
// 3) the s3 being hit must be set up to have usEastBehavior
|
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
|
||||||
if ((!locationConstraint || locationConstraint === 'us-east-1') &&
|
|
||||||
(!existingBucketMD.getLocationConstraint() ||
|
|
||||||
existingBucketMD.getLocationConstraint() === 'us-east-1') &&
|
|
||||||
usEastBehavior) {
|
|
||||||
log.trace('returning 200 instead of 409 to mirror us-east-1');
|
log.trace('returning 200 instead of 409 to mirror us-east-1');
|
||||||
return cb(null, existingBucketMD);
|
return cb(null, existingBucketMD);
|
||||||
}
|
}
|
||||||
return cb(errors.BucketAlreadyOwnedByYou, existingBucketMD);
|
return cb(errors.BucketAlreadyOwnedByYou, existingBucketMD);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
cleanUpBucket,
|
||||||
|
createBucket,
|
||||||
|
};
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
import { errors } from 'arsenal';
|
const assert = require('assert');
|
||||||
import assert from 'assert';
|
const async = require('async');
|
||||||
import async from 'async';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import { logger } from '../../../utilities/logger';
|
const logger = require('../../../utilities/logger');
|
||||||
|
|
||||||
import constants from '../../../../constants';
|
const constants = require('../../../../constants');
|
||||||
import createKeyForUserBucket from './createKeyForUserBucket';
|
const createKeyForUserBucket = require('./createKeyForUserBucket');
|
||||||
import metadata from '../../../metadata/wrapper';
|
const metadata = require('../../../metadata/wrapper');
|
||||||
import kms from '../../../kms/wrapper';
|
const kms = require('../../../kms/wrapper');
|
||||||
|
|
||||||
const usersBucket = constants.usersBucket;
|
const usersBucket = constants.usersBucket;
|
||||||
const oldUsersBucket = constants.oldUsersBucket;
|
const oldUsersBucket = constants.oldUsersBucket;
|
||||||
|
@ -18,7 +18,7 @@ function _deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
'_deleteUserBucketEntry' });
|
'_deleteUserBucketEntry' });
|
||||||
const keyForUserBucket = createKeyForUserBucket(canonicalID,
|
const keyForUserBucket = createKeyForUserBucket(canonicalID,
|
||||||
constants.splitter, bucketName);
|
constants.splitter, bucketName);
|
||||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, log, error => {
|
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||||
// If the object representing the bucket is not in the
|
// If the object representing the bucket is not in the
|
||||||
// users bucket just continue
|
// users bucket just continue
|
||||||
if (error && error.NoSuchKey) {
|
if (error && error.NoSuchKey) {
|
||||||
|
@ -29,7 +29,7 @@ function _deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||||
constants.oldSplitter, bucketName);
|
constants.oldSplitter, bucketName);
|
||||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||||
log, error => {
|
{}, log, error => {
|
||||||
if (error && !error.NoSuchKey) {
|
if (error && !error.NoSuchKey) {
|
||||||
log.error('from metadata while deleting user bucket',
|
log.error('from metadata while deleting user bucket',
|
||||||
{ error });
|
{ error });
|
||||||
|
@ -71,7 +71,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||||
* @param {string} canonicalID - bucket owner's canonicalID
|
* @param {string} canonicalID - bucket owner's canonicalID
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export function invisiblyDelete(bucketName, canonicalID) {
|
function invisiblyDelete(bucketName, canonicalID) {
|
||||||
const log = logger.newRequestLogger();
|
const log = logger.newRequestLogger();
|
||||||
log.trace('deleting bucket with deleted flag invisibly', { bucketName });
|
log.trace('deleting bucket with deleted flag invisibly', { bucketName });
|
||||||
return _deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
return _deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
||||||
|
@ -104,20 +104,28 @@ export function invisiblyDelete(bucketName, canonicalID) {
|
||||||
* @param {function} cb - callback from async.waterfall in bucketDelete
|
* @param {function} cb - callback from async.waterfall in bucketDelete
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
|
function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
log.trace('deleting bucket from metadata');
|
log.trace('deleting bucket from metadata');
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
assert.strictEqual(typeof canonicalID, 'string');
|
assert.strictEqual(typeof canonicalID, 'string');
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function checkForObjectsStep(next) {
|
function checkForObjectsStep(next) {
|
||||||
return metadata.listObject(bucketName, { maxKeys: 1 }, log,
|
const params = { maxKeys: 1, listingType: 'DelimiterVersions' };
|
||||||
(err, objectsListRes) => {
|
// We list all the versions as we want to return BucketNotEmpty
|
||||||
|
// error if there are any versions or delete markers in the bucket.
|
||||||
|
// Works for non-versioned buckets as well since listing versions
|
||||||
|
// includes null (non-versioned) objects in the result.
|
||||||
|
return metadata.listObject(bucketName, params, log,
|
||||||
|
(err, list) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error from metadata', { error: err });
|
log.error('error from metadata', { error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (objectsListRes.Contents.length) {
|
const length = (list.Versions ? list.Versions.length : 0) +
|
||||||
|
(list.DeleteMarkers ? list.DeleteMarkers.length : 0);
|
||||||
|
log.debug('listing result', { length });
|
||||||
|
if (length) {
|
||||||
log.debug('bucket delete failed',
|
log.debug('bucket delete failed',
|
||||||
{ error: errors.BucketNotEmpty });
|
{ error: errors.BucketNotEmpty });
|
||||||
return next(errors.BucketNotEmpty);
|
return next(errors.BucketNotEmpty);
|
||||||
|
@ -188,3 +196,8 @@ export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
invisiblyDelete,
|
||||||
|
deleteBucket,
|
||||||
|
};
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import { invisiblyDelete } from './bucketDeletion';
|
const { invisiblyDelete } = require('./bucketDeletion');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks whether to proceed with a request based on the bucket flags
|
* Checks whether to proceed with a request based on the bucket flags
|
||||||
|
@ -7,7 +7,7 @@ import { invisiblyDelete } from './bucketDeletion';
|
||||||
* @param {string} requestType - type of api request
|
* @param {string} requestType - type of api request
|
||||||
* @return {boolean} true if the bucket should be shielded, false otherwise
|
* @return {boolean} true if the bucket should be shielded, false otherwise
|
||||||
*/
|
*/
|
||||||
export default function (bucket, requestType) {
|
function bucketShield(bucket, requestType) {
|
||||||
const invisiblyDeleteRequests = ['bucketGet', 'bucketHead',
|
const invisiblyDeleteRequests = ['bucketGet', 'bucketHead',
|
||||||
'bucketGetACL', 'bucketOwnerAction', 'objectGet', 'objectGetACL',
|
'bucketGetACL', 'bucketOwnerAction', 'objectGet', 'objectGetACL',
|
||||||
'objectHead', 'objectPutACL', 'objectDelete'];
|
'objectHead', 'objectPutACL', 'objectDelete'];
|
||||||
|
@ -27,3 +27,5 @@ export default function (bucket, requestType) {
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketShield;
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
import { errors } from 'arsenal';
|
const { parseString } = require('xml2js');
|
||||||
import { parseString } from 'xml2js';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import escapeForXml from '../../../utilities/escapeForXML';
|
const escapeForXml = require('../../../utilities/escapeForXML');
|
||||||
import {
|
const { WebsiteConfiguration }
|
||||||
WebsiteConfiguration,
|
= require('../../../metadata/WebsiteConfiguration');
|
||||||
} from '../../../metadata/WebsiteConfiguration';
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
|
||||||
|
@ -55,7 +54,7 @@ function _isValidElem(elem) {
|
||||||
* @param {boolean} [validateParent] - validate format of parent element
|
* @param {boolean} [validateParent] - validate format of parent element
|
||||||
* @return {boolean} true / false - if parsed xml element contains child
|
* @return {boolean} true / false - if parsed xml element contains child
|
||||||
*/
|
*/
|
||||||
export function xmlContainsElem(parent, requiredElem, options) {
|
function xmlContainsElem(parent, requiredElem, options) {
|
||||||
// Non-top level xml is parsed into object in the following manner.
|
// Non-top level xml is parsed into object in the following manner.
|
||||||
|
|
||||||
// Example: <Parent><requiredElem>value</requiredElem>
|
// Example: <Parent><requiredElem>value</requiredElem>
|
||||||
|
@ -332,7 +331,7 @@ function _validateWebsiteConfigXml(parsingResult) {
|
||||||
return websiteConfig;
|
return websiteConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
export function parseWebsiteConfigXml(xml, log, cb) {
|
function parseWebsiteConfigXml(xml, log, cb) {
|
||||||
parseString(xml, (err, result) => {
|
parseString(xml, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('xml parsing failed', {
|
log.trace('xml parsing failed', {
|
||||||
|
@ -364,7 +363,7 @@ export function parseWebsiteConfigXml(xml, log, cb) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
export function convertToXml(config) {
|
function convertToXml(config) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
const indexDocument = config.getIndexDocument();
|
const indexDocument = config.getIndexDocument();
|
||||||
const errorDocument = config.getErrorDocument();
|
const errorDocument = config.getErrorDocument();
|
||||||
|
@ -428,3 +427,9 @@ export function convertToXml(config) {
|
||||||
xml.push('</WebsiteConfiguration>');
|
xml.push('</WebsiteConfiguration>');
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
xmlContainsElem,
|
||||||
|
parseWebsiteConfigXml,
|
||||||
|
convertToXml,
|
||||||
|
};
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
|
function createKeyForUserBucket(canonicalID,
|
||||||
export default function createKeyForUserBucket(canonicalID,
|
|
||||||
splitter, bucketName) {
|
splitter, bucketName) {
|
||||||
return `${canonicalID}${splitter}${bucketName}`;
|
return `${canonicalID}${splitter}${bucketName}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = createKeyForUserBucket;
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
const parseXML = require('../../../utilities/parseXML');
|
||||||
|
const ReplicationConfiguration = require('./models/ReplicationConfiguration');
|
||||||
|
|
||||||
|
// Handle the steps for returning a valid replication configuration object.
|
||||||
|
function getReplicationConfiguration(xml, log, cb) {
|
||||||
|
return parseXML(xml, log, (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const validator = new ReplicationConfiguration(result, log);
|
||||||
|
const configErr = validator.parseConfiguration();
|
||||||
|
return cb(configErr || null, validator.getReplicationConfiguration());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = getReplicationConfiguration;
|
|
@ -0,0 +1,340 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
const UUID = require('node-uuid');
|
||||||
|
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const MAX_RULES = 1000;
|
||||||
|
const RULE_ID_LIMIT = 255;
|
||||||
|
const validStorageClasses = [
|
||||||
|
undefined,
|
||||||
|
'STANDARD',
|
||||||
|
'STANDARD_IA',
|
||||||
|
'REDUCED_REDUNDANCY',
|
||||||
|
];
|
||||||
|
|
||||||
|
/**
|
||||||
|
Example XML request:
|
||||||
|
|
||||||
|
<ReplicationConfiguration>
|
||||||
|
<Role>IAM-role-ARN</Role>
|
||||||
|
<Rule>
|
||||||
|
<ID>Rule-1</ID>
|
||||||
|
<Status>rule-status</Status>
|
||||||
|
<Prefix>key-prefix</Prefix>
|
||||||
|
<Destination>
|
||||||
|
<Bucket>arn:aws:s3:::bucket-name</Bucket>
|
||||||
|
<StorageClass>
|
||||||
|
optional-destination-storage-class-override
|
||||||
|
</StorageClass>
|
||||||
|
</Destination>
|
||||||
|
</Rule>
|
||||||
|
<Rule>
|
||||||
|
<ID>Rule-2</ID>
|
||||||
|
...
|
||||||
|
</Rule>
|
||||||
|
...
|
||||||
|
</ReplicationConfiguration>
|
||||||
|
*/
|
||||||
|
|
||||||
|
class ReplicationConfiguration {
|
||||||
|
/**
|
||||||
|
* Create a ReplicationConfiguration instance
|
||||||
|
* @param {string} xml - The parsed XML
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @return {object} - ReplicationConfiguration instance
|
||||||
|
*/
|
||||||
|
constructor(xml, log) {
|
||||||
|
this._parsedXML = xml;
|
||||||
|
this._log = log;
|
||||||
|
this._configPrefixes = [];
|
||||||
|
this._configIDs = [];
|
||||||
|
// The bucket metadata model of replication config. Note there is a
|
||||||
|
// single `destination` property because we can replicate to only one
|
||||||
|
// other bucket. Thus each rule is simplified to these properties.
|
||||||
|
this._role = null;
|
||||||
|
this._destination = null;
|
||||||
|
this._rules = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the role of the bucket replication configuration
|
||||||
|
* @return {string|null} - The role if defined, otherwise `null`
|
||||||
|
*/
|
||||||
|
getRole() {
|
||||||
|
return this._role;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The bucket to replicate data to
|
||||||
|
* @return {string|null} - The bucket if defined, otherwise `null`
|
||||||
|
*/
|
||||||
|
getDestination() {
|
||||||
|
return this._destination;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The rules for replication configuration
|
||||||
|
* @return {string|null} - The rules if defined, otherwise `null`
|
||||||
|
*/
|
||||||
|
getRules() {
|
||||||
|
return this._rules;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the replication configuration
|
||||||
|
* @return {object} - The replication configuration
|
||||||
|
*/
|
||||||
|
getReplicationConfiguration() {
|
||||||
|
return {
|
||||||
|
role: this.getRole(),
|
||||||
|
destination: this.getDestination(),
|
||||||
|
rules: this.getRules(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Build the rule object from the parsed XML of the given rule
|
||||||
|
* @param {object} rule - The rule object from this._parsedXML
|
||||||
|
* @return {object} - The rule object to push into the `Rules` array
|
||||||
|
*/
|
||||||
|
_buildRuleObject(rule) {
|
||||||
|
const obj = {
|
||||||
|
prefix: rule.Prefix[0],
|
||||||
|
enabled: rule.Status[0] === 'Enabled',
|
||||||
|
};
|
||||||
|
// ID is an optional property, but create one if not provided or is ''.
|
||||||
|
// We generate a 48-character alphanumeric, unique ID for the rule.
|
||||||
|
obj.id = rule.ID && rule.ID[0] !== '' ? rule.ID[0] :
|
||||||
|
Buffer.from(UUID.v4()).toString('base64');
|
||||||
|
// StorageClass is an optional property.
|
||||||
|
if (rule.Destination[0].StorageClass) {
|
||||||
|
obj.storageClass = rule.Destination[0].StorageClass[0];
|
||||||
|
}
|
||||||
|
return obj;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `Role` property of the configuration is valid
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseRole() {
|
||||||
|
const Role = this._parsedXML.ReplicationConfiguration.Role;
|
||||||
|
if (!Role) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
// TODO: Update to validate role priveleges after implemented in Vault.
|
||||||
|
// Role should be an IAM user name.
|
||||||
|
const arr = Role[0].split(':');
|
||||||
|
const isValidARN = arr.length === 7 ||
|
||||||
|
(arr.length === 6 && arr[5].split('/').length === 2);
|
||||||
|
if (!isValidARN) {
|
||||||
|
return errors.InvalidArgument.customizeDescription(
|
||||||
|
'Invalid Role specified in replication configuration');
|
||||||
|
}
|
||||||
|
this._role = Role[0];
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `Rules` property array is valid
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseRules() {
|
||||||
|
// Note that the XML uses 'Rule' while the config object uses 'Rules'.
|
||||||
|
const { Rule } = this._parsedXML.ReplicationConfiguration;
|
||||||
|
if (!Rule || Rule.length < 1) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
if (Rule.length > MAX_RULES) {
|
||||||
|
return errors.InvalidRequest.customizeDescription(
|
||||||
|
'Number of defined replication rules cannot exceed 1000');
|
||||||
|
}
|
||||||
|
const err = this._parseEachRule(Rule);
|
||||||
|
if (err) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that each rule in the `Rules` property array is valid
|
||||||
|
* @param {array} rules - The rule array from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseEachRule(rules) {
|
||||||
|
const rulesArr = [];
|
||||||
|
for (let i = 0; i < rules.length; i++) {
|
||||||
|
const err =
|
||||||
|
this._parseStatus(rules[i]) || this._parsePrefix(rules[i]) ||
|
||||||
|
this._parseID(rules[i]) || this._parseDestination(rules[i]);
|
||||||
|
if (err) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
rulesArr.push(this._buildRuleObject(rules[i]));
|
||||||
|
}
|
||||||
|
this._rules = rulesArr;
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `Status` property is valid
|
||||||
|
* @param {object} rule - The rule object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseStatus(rule) {
|
||||||
|
const status = rule.Status && rule.Status[0];
|
||||||
|
if (!status || !['Enabled', 'Disabled'].includes(status)) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `Prefix` property is valid
|
||||||
|
* @param {object} rule - The rule object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parsePrefix(rule) {
|
||||||
|
const prefix = rule.Prefix && rule.Prefix[0];
|
||||||
|
// An empty string prefix should be allowed.
|
||||||
|
if (!prefix && prefix !== '') {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
if (prefix.length > 1024) {
|
||||||
|
return errors.InvalidArgument.customizeDescription('Rule prefix ' +
|
||||||
|
'cannot be longer than maximum allowed key length of 1024');
|
||||||
|
}
|
||||||
|
// Each Prefix in a list of rules must not overlap. For example, two
|
||||||
|
// prefixes 'TaxDocs' and 'TaxDocs/2015' are overlapping. An empty
|
||||||
|
// string prefix is expected to overlap with any other prefix.
|
||||||
|
for (let i = 0; i < this._configPrefixes.length; i++) {
|
||||||
|
const used = this._configPrefixes[i];
|
||||||
|
if (prefix.startsWith(used) || used.startsWith(prefix)) {
|
||||||
|
return errors.InvalidRequest.customizeDescription('Found ' +
|
||||||
|
`overlapping prefixes '${used}' and '${prefix}'`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this._configPrefixes.push(prefix);
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `ID` property is valid
|
||||||
|
* @param {object} rule - The rule object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseID(rule) {
|
||||||
|
const id = rule.ID && rule.ID[0];
|
||||||
|
if (id && id.length > RULE_ID_LIMIT) {
|
||||||
|
return errors.InvalidArgument
|
||||||
|
.customizeDescription('Rule Id cannot be greater than 255');
|
||||||
|
}
|
||||||
|
// Each ID in a list of rules must be unique.
|
||||||
|
if (this._configIDs.includes(id)) {
|
||||||
|
return errors.InvalidRequest.customizeDescription(
|
||||||
|
'Rule Id must be unique');
|
||||||
|
}
|
||||||
|
this._configIDs.push(id);
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `StorageClass` is a valid class
|
||||||
|
* @param {string} storageClass - The storage class to validate
|
||||||
|
* @return {boolean} `true` if valid, otherwise `false`
|
||||||
|
*/
|
||||||
|
static _isValidStorageClass(storageClass) {
|
||||||
|
return validStorageClasses.includes(storageClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `StorageClass` property is valid
|
||||||
|
* @param {object} destination - The destination object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseStorageClass(destination) {
|
||||||
|
const storageClass = destination.StorageClass &&
|
||||||
|
destination.StorageClass[0];
|
||||||
|
if (!ReplicationConfiguration._isValidStorageClass(storageClass)) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `Bucket` property is valid
|
||||||
|
* @param {object} destination - The destination object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseBucket(destination) {
|
||||||
|
const bucket = destination.Bucket && destination.Bucket[0];
|
||||||
|
if (!bucket) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
const isValidARN = bucket.split(':').length === 6;
|
||||||
|
if (!isValidARN) {
|
||||||
|
return errors.InvalidArgument
|
||||||
|
.customizeDescription('Invalid bucket ARN');
|
||||||
|
}
|
||||||
|
// We can replicate objects only to one destination bucket.
|
||||||
|
if (this._destination && this._destination !== bucket) {
|
||||||
|
return errors.InvalidRequest.customizeDescription(
|
||||||
|
'The destination bucket must be same for all rules');
|
||||||
|
}
|
||||||
|
this._destination = bucket;
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the `destination` property is valid
|
||||||
|
* @param {object} rule - The rule object from this._parsedXML
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
_parseDestination(rule) {
|
||||||
|
const dest = rule.Destination && rule.Destination[0];
|
||||||
|
if (!dest) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
const err = this._parseBucket(dest) || this._parseStorageClass(dest);
|
||||||
|
if (err) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check that the request configuration is valid
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
parseConfiguration() {
|
||||||
|
const err = this._parseRole() || this._parseRules();
|
||||||
|
if (err) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validate the bucket metadata replication configuration structure and
|
||||||
|
* value types
|
||||||
|
* @param {object} config - The replication configuration to validate
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
static validateConfig(config) {
|
||||||
|
assert.strictEqual(typeof config, 'object');
|
||||||
|
const { role, rules, destination } = config;
|
||||||
|
assert.strictEqual(typeof role, 'string');
|
||||||
|
assert.strictEqual(typeof destination, 'string');
|
||||||
|
assert.strictEqual(Array.isArray(rules), true);
|
||||||
|
rules.forEach(rule => {
|
||||||
|
assert.strictEqual(typeof rule, 'object');
|
||||||
|
const { prefix, enabled, id, storageClass } = rule;
|
||||||
|
assert.strictEqual(typeof prefix, 'string');
|
||||||
|
assert.strictEqual(typeof enabled, 'boolean');
|
||||||
|
assert(id === undefined || typeof id === 'string');
|
||||||
|
assert(this._isValidStorageClass(storageClass) === true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = ReplicationConfiguration;
|
|
@ -0,0 +1,207 @@
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const escapeForXML = require('../../../utilities/escapeForXML');
|
||||||
|
|
||||||
|
class BackendInfo {
|
||||||
|
/**
|
||||||
|
* Represents the info necessary to evaluate which data backend to use
|
||||||
|
* on a data put call.
|
||||||
|
* @constructor
|
||||||
|
* @param {string | undefined} objectLocationConstraint - location constraint
|
||||||
|
* for object based on user meta header
|
||||||
|
* @param {string | undefined } bucketLocationConstraint - location
|
||||||
|
* constraint for bucket based on bucket metadata
|
||||||
|
* @param {string} requestEndpoint - endpoint to which request was made
|
||||||
|
*/
|
||||||
|
constructor(objectLocationConstraint, bucketLocationConstraint,
|
||||||
|
requestEndpoint) {
|
||||||
|
this._objectLocationConstraint = objectLocationConstraint;
|
||||||
|
this._bucketLocationConstraint = bucketLocationConstraint;
|
||||||
|
this._requestEndpoint = requestEndpoint;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate proposed location constraint against config
|
||||||
|
* @param {string | undefined} locationConstraint - value of user
|
||||||
|
* metadata location constraint header or bucket location constraint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if valid, false if not
|
||||||
|
*/
|
||||||
|
static isValidLocationConstraint(locationConstraint, log) {
|
||||||
|
if (Object.keys(config.locationConstraints).
|
||||||
|
indexOf(locationConstraint) < 0) {
|
||||||
|
log.trace('proposed locationConstraint is invalid',
|
||||||
|
{ locationConstraint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that request endpoint is listed in the restEndpoint config
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if present, false if not
|
||||||
|
*/
|
||||||
|
static isRequestEndpointPresent(requestEndpoint, log) {
|
||||||
|
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
||||||
|
log.trace('requestEndpoint does not match config restEndpoints',
|
||||||
|
{ requestEndpoint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that locationConstraint for request Endpoint matches
|
||||||
|
* one config locationConstraint
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if matches, false if not
|
||||||
|
*/
|
||||||
|
static isRequestEndpointValueValid(requestEndpoint, log) {
|
||||||
|
if (Object.keys(config.locationConstraints).indexOf(config
|
||||||
|
.restEndpoints[requestEndpoint]) < 0) {
|
||||||
|
log.trace('the default locationConstraint for request' +
|
||||||
|
'Endpoint does not match any config locationConstraint',
|
||||||
|
{ requestEndpoint });
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate that s3 server is running with a file or memory backend
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if running with file/mem backend, false if not
|
||||||
|
*/
|
||||||
|
static isMemOrFileBackend(requestEndpoint, log) {
|
||||||
|
if (config.backends.data === 'mem' ||
|
||||||
|
config.backends.data === 'file') {
|
||||||
|
log.trace('use data backend for the location', {
|
||||||
|
dataBackend: config.backends.data,
|
||||||
|
method: 'isMemOrFileBackend',
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate requestEndpoint against config or mem/file data backend
|
||||||
|
* - if there is no match for the request endpoint in the config
|
||||||
|
* restEndpoints and data backend is set to mem or file we will use this
|
||||||
|
* data backend for the location.
|
||||||
|
* - if locationConstraint for request Endpoint does not match
|
||||||
|
* any config locationConstraint, we will return an error
|
||||||
|
* @param {string} requestEndpoint - request endpoint
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {boolean} - true if valid, false if not
|
||||||
|
*/
|
||||||
|
static isValidRequestEndpointOrBackend(requestEndpoint, log) {
|
||||||
|
if (!BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
|
||||||
|
return BackendInfo.isMemOrFileBackend(requestEndpoint, log);
|
||||||
|
}
|
||||||
|
return BackendInfo.isRequestEndpointValueValid(requestEndpoint, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validate controlling BackendInfo Parameter
|
||||||
|
* @param {string | undefined} objectLocationConstraint - value of user
|
||||||
|
* metadata location constraint header
|
||||||
|
* @param {string | null} bucketLocationConstraint - location
|
||||||
|
* constraint from bucket metadata
|
||||||
|
* @param {string} requestEndpoint - endpoint of request
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @return {object} - location contraint validity
|
||||||
|
*/
|
||||||
|
static controllingBackendParam(objectLocationConstraint,
|
||||||
|
bucketLocationConstraint, requestEndpoint, log) {
|
||||||
|
if (objectLocationConstraint) {
|
||||||
|
if (BackendInfo.isValidLocationConstraint(objectLocationConstraint,
|
||||||
|
log)) {
|
||||||
|
log.trace('objectLocationConstraint is valid');
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
log.trace('objectLocationConstraint is invalid');
|
||||||
|
return { isValid: false, description: 'Object Location Error - ' +
|
||||||
|
`Your object location "${escapeForXML(objectLocationConstraint)}"` +
|
||||||
|
'is not in your location config - Please update.' };
|
||||||
|
}
|
||||||
|
if (bucketLocationConstraint) {
|
||||||
|
if (BackendInfo.isValidLocationConstraint(bucketLocationConstraint,
|
||||||
|
log)) {
|
||||||
|
log.trace('bucketLocationConstraint is valid');
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
log.trace('bucketLocationConstraint is invalid');
|
||||||
|
return { isValid: false, description: 'Bucket Location Error - ' +
|
||||||
|
`Your bucket location "${escapeForXML(bucketLocationConstraint)}"` +
|
||||||
|
' is not in your location config - Please update.' };
|
||||||
|
}
|
||||||
|
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
||||||
|
log)) {
|
||||||
|
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||||
|
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||||
|
'in your config OR the default location constraint for request ' +
|
||||||
|
`endpoint "${escapeForXML(requestEndpoint)}" does not ` +
|
||||||
|
'match any config locationConstraint - Please update.' };
|
||||||
|
}
|
||||||
|
return { isValid: true };
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return objectLocationConstraint
|
||||||
|
* @return {string | undefined} objectLocationConstraint;
|
||||||
|
*/
|
||||||
|
getObjectLocationConstraint() {
|
||||||
|
return this._objectLocationConstraint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return bucketLocationConstraint
|
||||||
|
* @return {string | undefined} bucketLocationConstraint;
|
||||||
|
*/
|
||||||
|
getBucketLocationConstraint() {
|
||||||
|
return this._bucketLocationConstraint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return requestEndpoint
|
||||||
|
* @return {string} requestEndpoint;
|
||||||
|
*/
|
||||||
|
getRequestEndpoint() {
|
||||||
|
return this._requestEndpoint;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return locationConstraint that should be used with put request
|
||||||
|
* Order of priority is:
|
||||||
|
* (1) objectLocationConstraint,
|
||||||
|
* (2) bucketLocationConstraint,
|
||||||
|
* (3) default locationConstraint for requestEndpoint if requestEndpoint
|
||||||
|
* is listed in restEndpoints in config.json
|
||||||
|
* (4) default data backend
|
||||||
|
* @return {string} locationConstraint;
|
||||||
|
*/
|
||||||
|
getControllingLocationConstraint() {
|
||||||
|
const objectLC = this.getObjectLocationConstraint();
|
||||||
|
const bucketLC = this.getBucketLocationConstraint();
|
||||||
|
const reqEndpoint = this.getRequestEndpoint();
|
||||||
|
if (objectLC) {
|
||||||
|
return objectLC;
|
||||||
|
}
|
||||||
|
if (bucketLC) {
|
||||||
|
return bucketLC;
|
||||||
|
}
|
||||||
|
if (config.restEndpoints[reqEndpoint]) {
|
||||||
|
return config.restEndpoints[reqEndpoint];
|
||||||
|
}
|
||||||
|
return config.backends.data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
BackendInfo,
|
||||||
|
};
|
|
@ -64,7 +64,7 @@ function _headersMatchRule(headers, allowedHeaders) {
|
||||||
* in a preflight CORS request
|
* in a preflight CORS request
|
||||||
* @return {(null|object)} - matching rule if found; null if no match
|
* @return {(null|object)} - matching rule if found; null if no match
|
||||||
*/
|
*/
|
||||||
export function findCorsRule(rules, origin, method, headers) {
|
function findCorsRule(rules, origin, method, headers) {
|
||||||
return rules.find(rule => {
|
return rules.find(rule => {
|
||||||
if (rule.allowedMethods.indexOf(method) === -1) {
|
if (rule.allowedMethods.indexOf(method) === -1) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -99,7 +99,7 @@ export function findCorsRule(rules, origin, method, headers) {
|
||||||
* for a CORS preflight request
|
* for a CORS preflight request
|
||||||
* @return {object} resHeaders - headers to include in response
|
* @return {object} resHeaders - headers to include in response
|
||||||
*/
|
*/
|
||||||
export function generateCorsResHeaders(rule, origin, method, headers,
|
function generateCorsResHeaders(rule, origin, method, headers,
|
||||||
isPreflight) {
|
isPreflight) {
|
||||||
const resHeaders = {
|
const resHeaders = {
|
||||||
'access-control-max-age': rule.maxAgeSeconds,
|
'access-control-max-age': rule.maxAgeSeconds,
|
||||||
|
@ -130,3 +130,8 @@ isPreflight) {
|
||||||
}
|
}
|
||||||
return resHeaders;
|
return resHeaders;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
findCorsRule,
|
||||||
|
generateCorsResHeaders,
|
||||||
|
};
|
||||||
|
|
|
@ -0,0 +1,185 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors, s3validators } = require('arsenal');
|
||||||
|
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
|
||||||
|
|
||||||
|
const constants = require('../../../../constants');
|
||||||
|
const data = require('../../../data/wrapper');
|
||||||
|
const services = require('../../../services');
|
||||||
|
const logger = require('../../../utilities/logger');
|
||||||
|
const { dataStore } = require('./storeObject');
|
||||||
|
const locationConstraintCheck = require('./locationConstraintCheck');
|
||||||
|
const { versioningPreprocessing } = require('./versioning');
|
||||||
|
const removeAWSChunked = require('./removeAWSChunked');
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const validateWebsiteHeader = require('./websiteServing')
|
||||||
|
.validateWebsiteHeader;
|
||||||
|
|
||||||
|
|
||||||
|
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
|
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
|
||||||
|
services.metadataStoreObject(bucketName, dataGetInfo,
|
||||||
|
cipherBundle, metadataStoreParams, (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
if (dataToDelete) {
|
||||||
|
const newDataStoreName = Array.isArray(dataGetInfo) ?
|
||||||
|
dataGetInfo[0].dataStoreName : null;
|
||||||
|
data.batchDelete(dataToDelete, requestMethod,
|
||||||
|
newDataStoreName, deleteLog);
|
||||||
|
}
|
||||||
|
return callback(null, result);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** createAndStoreObject - store data, store metadata, and delete old data
|
||||||
|
* and old metadata as necessary
|
||||||
|
* @param {string} bucketName - name of bucket
|
||||||
|
* @param {BucketInfo} bucketMD - BucketInfo instance
|
||||||
|
* @param {string} objectKey - name of object
|
||||||
|
* @param {object} objMD - object metadata
|
||||||
|
* @param {AuthInfo} authInfo - AuthInfo instance with requester's info
|
||||||
|
* @param {string} canonicalID - user's canonical ID
|
||||||
|
* @param {object} cipherBundle - cipher bundle that encrypts the data
|
||||||
|
* @param {Request} request - http request object
|
||||||
|
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
|
||||||
|
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
|
||||||
|
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
||||||
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
|
* @param {RequestLogger} log - logger instance
|
||||||
|
* @param {function} callback - callback function
|
||||||
|
* @return {undefined} and call callback with (err, result) -
|
||||||
|
* result.contentMD5 - content md5 of new object or version
|
||||||
|
* result.versionId - unencrypted versionId returned by metadata
|
||||||
|
*/
|
||||||
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
|
log, callback) {
|
||||||
|
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||||
|
|
||||||
|
const websiteRedirectHeader =
|
||||||
|
request.headers['x-amz-website-redirect-location'];
|
||||||
|
if (!validateWebsiteHeader(websiteRedirectHeader)) {
|
||||||
|
const err = errors.InvalidRedirectLocation;
|
||||||
|
log.debug('invalid x-amz-website-redirect-location' +
|
||||||
|
`value ${websiteRedirectHeader}`, { error: err });
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
const metaHeaders = isDeleteMarker ? [] :
|
||||||
|
getMetaHeaders(request.headers);
|
||||||
|
if (metaHeaders instanceof Error) {
|
||||||
|
log.debug('user metadata validation failed', {
|
||||||
|
error: metaHeaders,
|
||||||
|
method: 'createAndStoreObject',
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(metaHeaders));
|
||||||
|
}
|
||||||
|
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
|
||||||
|
const objectKeyContext = {
|
||||||
|
bucketName,
|
||||||
|
owner: canonicalID,
|
||||||
|
namespace: request.namespace,
|
||||||
|
objectKey,
|
||||||
|
metaHeaders,
|
||||||
|
};
|
||||||
|
// If the request was made with a pre-signed url, the x-amz-acl 'header'
|
||||||
|
// might be in the query string rather than the actual headers so include
|
||||||
|
// it here
|
||||||
|
const headers = request.headers;
|
||||||
|
if (request.query && request.query['x-amz-acl']) {
|
||||||
|
headers['x-amz-acl'] = request.query['x-amz-acl'];
|
||||||
|
}
|
||||||
|
const metadataStoreParams = {
|
||||||
|
objectKey,
|
||||||
|
authInfo,
|
||||||
|
metaHeaders,
|
||||||
|
size,
|
||||||
|
headers,
|
||||||
|
isDeleteMarker,
|
||||||
|
log,
|
||||||
|
};
|
||||||
|
if (!isDeleteMarker) {
|
||||||
|
metadataStoreParams.contentType = request.headers['content-type'];
|
||||||
|
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||||
|
metadataStoreParams.contentDisposition =
|
||||||
|
request.headers['content-disposition'];
|
||||||
|
metadataStoreParams.contentEncoding =
|
||||||
|
removeAWSChunked(request.headers['content-encoding']);
|
||||||
|
metadataStoreParams.expires = request.headers.expires;
|
||||||
|
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
|
||||||
|
}
|
||||||
|
|
||||||
|
const backendInfoObj =
|
||||||
|
locationConstraintCheck(request, null, bucketMD, log);
|
||||||
|
if (backendInfoObj.err) {
|
||||||
|
return process.nextTick(() => {
|
||||||
|
callback(backendInfoObj.err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
const backendInfo = backendInfoObj.backendInfo;
|
||||||
|
const location = backendInfo.getControllingLocationConstraint();
|
||||||
|
const locationType = config.locationConstraints[location].type;
|
||||||
|
/* eslint-disable camelcase */
|
||||||
|
const dontSkipBackend = { aws_s3: true };
|
||||||
|
/* eslint-enable camelcase */
|
||||||
|
|
||||||
|
const requestLogger =
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||||
|
return async.waterfall([
|
||||||
|
function storeData(next) {
|
||||||
|
if (size === 0 && !dontSkipBackend[locationType]) {
|
||||||
|
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
||||||
|
return next(null, null, null);
|
||||||
|
}
|
||||||
|
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||||
|
streamingV4Params, backendInfo, log, next);
|
||||||
|
},
|
||||||
|
function processDataResult(dataGetInfo, calculatedHash, next) {
|
||||||
|
if (dataGetInfo === null || dataGetInfo === undefined) {
|
||||||
|
return next(null, null);
|
||||||
|
}
|
||||||
|
// So that data retrieval information for MPU's and
|
||||||
|
// regular puts are stored in the same data structure,
|
||||||
|
// place the retrieval info here into a single element array
|
||||||
|
const { key, dataStoreName, dataStoreType, dataStoreETag } =
|
||||||
|
dataGetInfo;
|
||||||
|
const dataGetInfoArr = [{ key, size, start: 0, dataStoreName,
|
||||||
|
dataStoreType, dataStoreETag }];
|
||||||
|
if (cipherBundle) {
|
||||||
|
dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme;
|
||||||
|
dataGetInfoArr[0].cipheredDataKey =
|
||||||
|
cipherBundle.cipheredDataKey;
|
||||||
|
}
|
||||||
|
metadataStoreParams.contentMD5 = calculatedHash;
|
||||||
|
return next(null, dataGetInfoArr);
|
||||||
|
},
|
||||||
|
function getVersioningInfo(infoArr, next) {
|
||||||
|
return versioningPreprocessing(bucketName, bucketMD,
|
||||||
|
metadataStoreParams.objectKey, objMD, log, (err, options) => {
|
||||||
|
if (err) {
|
||||||
|
// TODO: check AWS error when user requested a specific
|
||||||
|
// version before any versions have been put
|
||||||
|
const logLvl = err === errors.BadRequest ?
|
||||||
|
'debug' : 'error';
|
||||||
|
log[logLvl]('error getting versioning info', {
|
||||||
|
error: err,
|
||||||
|
method: 'versioningPreprocessing',
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return next(err, options, infoArr);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function storeMDAndDeleteData(options, infoArr, next) {
|
||||||
|
metadataStoreParams.versionId = options.versionId;
|
||||||
|
metadataStoreParams.versioning = options.versioning;
|
||||||
|
metadataStoreParams.isNull = options.isNull;
|
||||||
|
metadataStoreParams.nullVersionId = options.nullVersionId;
|
||||||
|
return _storeInMDandDeleteData(bucketName, infoArr,
|
||||||
|
cipherBundle, metadataStoreParams,
|
||||||
|
options.dataToDelete, requestLogger, request.method, next);
|
||||||
|
},
|
||||||
|
], callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = createAndStoreObject;
|
|
@ -0,0 +1,52 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { BackendInfo } = require('./BackendInfo');
|
||||||
|
const constants = require('../../../../constants');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* locationConstraintCheck - if new config, on object put, object copy,
|
||||||
|
* or initiate MPU request, gathers object location constraint,
|
||||||
|
* bucket locationconstraint, and request endpoint and checks their validity
|
||||||
|
* @param {request} request - normalized request object
|
||||||
|
* @param {object} metaHeaders - headers of metadata storage params used in
|
||||||
|
* objectCopy api
|
||||||
|
* @param {BucketInfo} bucket - metadata BucketInfo instance
|
||||||
|
* @param {object} log - Werelogs instance
|
||||||
|
* @return {object} - consists of three keys: error, controllingLC, and
|
||||||
|
* backendInfo. backendInfo only has value if new config
|
||||||
|
*/
|
||||||
|
function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
||||||
|
let backendInfoObj = {};
|
||||||
|
|
||||||
|
let objectLocationConstraint;
|
||||||
|
if (metaHeaders) {
|
||||||
|
objectLocationConstraint =
|
||||||
|
metaHeaders[constants.objectLocationConstraintHeader];
|
||||||
|
} else {
|
||||||
|
objectLocationConstraint = request
|
||||||
|
.headers[constants.objectLocationConstraintHeader];
|
||||||
|
}
|
||||||
|
const bucketLocationConstraint = bucket.getLocationConstraint();
|
||||||
|
const requestEndpoint = request.parsedHost;
|
||||||
|
|
||||||
|
const controllingBackend = BackendInfo.controllingBackendParam(
|
||||||
|
objectLocationConstraint, bucketLocationConstraint,
|
||||||
|
requestEndpoint, log);
|
||||||
|
if (!controllingBackend.isValid) {
|
||||||
|
backendInfoObj = {
|
||||||
|
err: errors.InvalidArgument.customizeDescription(controllingBackend.
|
||||||
|
description),
|
||||||
|
};
|
||||||
|
return backendInfoObj;
|
||||||
|
}
|
||||||
|
const backendInfo = new BackendInfo(objectLocationConstraint,
|
||||||
|
bucketLocationConstraint, requestEndpoint);
|
||||||
|
backendInfoObj = {
|
||||||
|
err: null,
|
||||||
|
controllingLC: backendInfo.getControllingLocationConstraint(),
|
||||||
|
backendInfo,
|
||||||
|
};
|
||||||
|
return backendInfoObj;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = locationConstraintCheck;
|
|
@ -0,0 +1,39 @@
|
||||||
|
const url = require('url');
|
||||||
|
const querystring = require('querystring');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { decodeVersionId } = require('./versioning');
|
||||||
|
|
||||||
|
/** parseCopySource - parse objectCopy or objectPutCopyPart copy source header
|
||||||
|
* @param {string} apiMethod - api method
|
||||||
|
* @param {string} copySourceHeader - 'x-amz-copy-source' request header
|
||||||
|
* @return {object} - sourceBucket, sourceObject, sourceVersionId, parsingError
|
||||||
|
*/
|
||||||
|
function parseCopySource(apiMethod, copySourceHeader) {
|
||||||
|
if (apiMethod !== 'objectCopy' && apiMethod !== 'objectPutCopyPart') {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
const { pathname, query } = url.parse(copySourceHeader);
|
||||||
|
let source = querystring.unescape(pathname);
|
||||||
|
// If client sends the source bucket/object with a leading /, remove it
|
||||||
|
if (source[0] === '/') {
|
||||||
|
source = source.slice(1);
|
||||||
|
}
|
||||||
|
const slashSeparator = source.indexOf('/');
|
||||||
|
if (slashSeparator === -1) {
|
||||||
|
return { parsingError: errors.InvalidArgument };
|
||||||
|
}
|
||||||
|
// Pull the source bucket and source object separated by /
|
||||||
|
const sourceBucket = source.slice(0, slashSeparator);
|
||||||
|
const sourceObject = source.slice(slashSeparator + 1);
|
||||||
|
const sourceVersionId =
|
||||||
|
decodeVersionId(query ? querystring.parse(query) : undefined);
|
||||||
|
if (sourceVersionId instanceof Error) {
|
||||||
|
const err = sourceVersionId;
|
||||||
|
return { parsingError: err };
|
||||||
|
}
|
||||||
|
|
||||||
|
return { sourceBucket, sourceObject, sourceVersionId };
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = parseCopySource;
|
|
@ -1,60 +0,0 @@
|
||||||
import { errors } from 'arsenal';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* parseRange - Validate and parse range request header
|
|
||||||
* @param {string} rangeHeader - range header from request
|
|
||||||
* which should be in form bytes=0-9
|
|
||||||
* @param {number} totalLength - totalLength of object
|
|
||||||
* @return {object} object containing range (array | undefined) and error if
|
|
||||||
* range is invalid
|
|
||||||
*/
|
|
||||||
export function parseRange(rangeHeader, totalLength) {
|
|
||||||
// If the range is invalid in any manner, AWS just returns the full object
|
|
||||||
// (end is inclusive so minus 1)
|
|
||||||
const maxEnd = totalLength - 1;
|
|
||||||
let range = undefined;
|
|
||||||
if (!rangeHeader.startsWith('bytes=')
|
|
||||||
|| rangeHeader.indexOf('-') < 0
|
|
||||||
// Multiple ranges not supported
|
|
||||||
|| rangeHeader.indexOf(',') > 0) {
|
|
||||||
return { range };
|
|
||||||
}
|
|
||||||
const rangePortion = rangeHeader.replace('bytes=', '').split('-');
|
|
||||||
if (rangePortion.length > 2) {
|
|
||||||
return { range };
|
|
||||||
}
|
|
||||||
let start;
|
|
||||||
let end;
|
|
||||||
// Handle incomplete specifier where just offset from end is given
|
|
||||||
if (rangePortion[0] === '') {
|
|
||||||
const offset = parseInt(rangePortion[1], 10);
|
|
||||||
if (Number.isNaN(offset)) {
|
|
||||||
return { range };
|
|
||||||
}
|
|
||||||
start = totalLength - offset;
|
|
||||||
end = maxEnd;
|
|
||||||
// Handle incomplete specifier where just starting place is given
|
|
||||||
// meaning range goes from start of range to end of object
|
|
||||||
} else if (rangePortion[1] === '') {
|
|
||||||
start = parseInt(rangePortion[0], 10);
|
|
||||||
end = maxEnd;
|
|
||||||
} else {
|
|
||||||
start = parseInt(rangePortion[0], 10);
|
|
||||||
end = parseInt(rangePortion[1], 10);
|
|
||||||
}
|
|
||||||
// InvalidRange when the resource being accessed does not cover
|
|
||||||
// the byte range
|
|
||||||
if (start >= totalLength && end >= totalLength) {
|
|
||||||
return { range, error: errors.InvalidRange };
|
|
||||||
}
|
|
||||||
end = Math.min(end, maxEnd);
|
|
||||||
|
|
||||||
if (Number.isNaN(start) || Number.isNaN(end) || start > end) {
|
|
||||||
return { range };
|
|
||||||
}
|
|
||||||
if (start < 0) {
|
|
||||||
start = 0;
|
|
||||||
}
|
|
||||||
range = [start, end];
|
|
||||||
return { range };
|
|
||||||
}
|
|
|
@ -5,9 +5,11 @@
|
||||||
* @param {string} sourceHeader - Content-Encoding header from request headers
|
* @param {string} sourceHeader - Content-Encoding header from request headers
|
||||||
* @return {string} new value w. 'aws-chunked'/'aws-chunked,' substring removed
|
* @return {string} new value w. 'aws-chunked'/'aws-chunked,' substring removed
|
||||||
*/
|
*/
|
||||||
export default function removeAWSChunked(sourceHeader) {
|
function removeAWSChunked(sourceHeader) {
|
||||||
if (sourceHeader === undefined) {
|
if (sourceHeader === undefined) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
return sourceHeader.replace(/aws-chunked,?/, '');
|
return sourceHeader.replace(/aws-chunked,?/, '');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = removeAWSChunked;
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
/**
|
||||||
|
* @param {array} dataLocations - all data locations
|
||||||
|
* @param {array} outerRange - range from request
|
||||||
|
* @return {array} parsedLocations - dataLocations filtered for
|
||||||
|
* what needed and ranges added for particular parts as needed
|
||||||
|
*/
|
||||||
|
function setPartRanges(dataLocations, outerRange) {
|
||||||
|
const parsedLocations = [];
|
||||||
|
|
||||||
|
if (!outerRange) {
|
||||||
|
return dataLocations.slice();
|
||||||
|
}
|
||||||
|
|
||||||
|
const begin = outerRange[0];
|
||||||
|
const end = outerRange[1];
|
||||||
|
// If have single location, do not need to break up range among parts
|
||||||
|
// and might not have a start and size property
|
||||||
|
// on the dataLocation (because might be pre- md-model-version 2),
|
||||||
|
// so just set range as property
|
||||||
|
if (dataLocations.length === 1) {
|
||||||
|
const soleLocation = dataLocations[0];
|
||||||
|
soleLocation.range = [begin, end];
|
||||||
|
// If missing size, does not impact get range.
|
||||||
|
// We modify size here in case this function is used for
|
||||||
|
// object put part copy where will need size.
|
||||||
|
// If pre-md-model-version 2, object put part copy will not
|
||||||
|
// be allowed, so not an issue that size not modified here.
|
||||||
|
if (dataLocations[0].size) {
|
||||||
|
const partSize = parseInt(dataLocations[0].size, 10);
|
||||||
|
soleLocation.size =
|
||||||
|
Math.min(partSize, end - begin + 1).toString();
|
||||||
|
}
|
||||||
|
parsedLocations.push(soleLocation);
|
||||||
|
return parsedLocations;
|
||||||
|
}
|
||||||
|
// Range is inclusive of endpoint so need plus 1
|
||||||
|
const max = end - begin + 1;
|
||||||
|
let total = 0;
|
||||||
|
for (let i = 0; i < dataLocations.length; i++) {
|
||||||
|
if (total >= max) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const partStart = parseInt(dataLocations[i].start, 10);
|
||||||
|
const partSize = parseInt(dataLocations[i].size, 10);
|
||||||
|
if (partStart + partSize <= begin) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (partStart >= begin) {
|
||||||
|
// If the whole part is in the range, just include it
|
||||||
|
if (partSize + total <= max) {
|
||||||
|
const partWithoutRange = dataLocations[i];
|
||||||
|
partWithoutRange.size = partSize.toString();
|
||||||
|
parsedLocations.push(partWithoutRange);
|
||||||
|
total += partSize;
|
||||||
|
// Otherwise set a range limit on the part end
|
||||||
|
// and we're done
|
||||||
|
} else {
|
||||||
|
const partWithRange = dataLocations[i];
|
||||||
|
// Need to subtract one from endPart since range
|
||||||
|
// includes endPart in byte count
|
||||||
|
const endPart = Math.min(partSize - 1, max - total - 1);
|
||||||
|
partWithRange.range = [0, endPart];
|
||||||
|
// modify size to be stored for object put part copy
|
||||||
|
partWithRange.size = (endPart + 1).toString();
|
||||||
|
parsedLocations.push(dataLocations[i]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Offset start (and end if necessary)
|
||||||
|
const partWithRange = dataLocations[i];
|
||||||
|
const startOffset = begin - partStart;
|
||||||
|
// Use full remaining part if remaining partSize is less
|
||||||
|
// than byte range we need to satisfy. Or use byte range
|
||||||
|
// we need to satisfy taking into account any startOffset
|
||||||
|
const endPart = Math.min(partSize - 1,
|
||||||
|
max - total + startOffset - 1);
|
||||||
|
partWithRange.range = [startOffset, endPart];
|
||||||
|
// modify size to be stored for object put part copy
|
||||||
|
partWithRange.size = (endPart - startOffset + 1).toString();
|
||||||
|
parsedLocations.push(partWithRange);
|
||||||
|
// Need to add byte back since with total we are counting
|
||||||
|
// number of bytes while the endPart and startOffset
|
||||||
|
// are in terms of range which include the endpoint
|
||||||
|
total += endPart - startOffset + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parsedLocations;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = setPartRanges;
|
|
@ -1,8 +1,8 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
const { parseRange } = require('arsenal/lib/network/http/utils');
|
||||||
|
|
||||||
import constants from '../../../../constants';
|
const constants = require('../../../../constants');
|
||||||
import routesUtils from '../../../routes/routesUtils';
|
const setPartRanges = require('./setPartRanges');
|
||||||
import { parseRange } from './parseRange';
|
|
||||||
/**
|
/**
|
||||||
* Uses the source object metadata and the requestHeaders
|
* Uses the source object metadata and the requestHeaders
|
||||||
* to determine the location of the data to be copied and the
|
* to determine the location of the data to be copied and the
|
||||||
|
@ -13,7 +13,6 @@ import { parseRange } from './parseRange';
|
||||||
* @return {object} object containing error if any or a dataLocator (array)
|
* @return {object} object containing error if any or a dataLocator (array)
|
||||||
* and objectSize (number) if no error
|
* and objectSize (number) if no error
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
let dataLocator;
|
let dataLocator;
|
||||||
// If 0 byte object just set dataLocator to empty array
|
// If 0 byte object just set dataLocator to empty array
|
||||||
|
@ -40,8 +39,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
parseInt(sourceObjMD['content-length'], 10);
|
parseInt(sourceObjMD['content-length'], 10);
|
||||||
let copyObjectSize = sourceSize;
|
let copyObjectSize = sourceSize;
|
||||||
if (rangeHeader) {
|
if (rangeHeader) {
|
||||||
const { range, error } = parseRange(rangeHeader,
|
const { range, error } = parseRange(rangeHeader, sourceSize);
|
||||||
sourceSize);
|
|
||||||
if (error) {
|
if (error) {
|
||||||
return { error };
|
return { error };
|
||||||
}
|
}
|
||||||
|
@ -60,10 +58,8 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
if (range) {
|
if (range) {
|
||||||
dataLocator =
|
dataLocator = setPartRanges(dataLocator, range);
|
||||||
routesUtils.setPartRanges(dataLocator, range);
|
copyObjectSize = range[1] - range[0] + 1;
|
||||||
copyObjectSize = Math.min(sourceSize - range[0],
|
|
||||||
range[1] - range[0] + 1);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (copyObjectSize > constants.maximumAllowedPartSize) {
|
if (copyObjectSize > constants.maximumAllowedPartSize) {
|
||||||
|
@ -73,3 +69,5 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
}
|
}
|
||||||
return { dataLocator, copyObjectSize };
|
return { dataLocator, copyObjectSize };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = setUpCopyLocator;
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
import V4Transform from '../../../auth/streamingV4/V4Transform';
|
|
||||||
import data from '../../../data/wrapper';
|
const V4Transform = require('../../../auth/streamingV4/V4Transform');
|
||||||
|
const data = require('../../../data/wrapper');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Prepares the stream if the chunks are sent in a v4 Auth request
|
* Prepares the stream if the chunks are sent in a v4 Auth request
|
||||||
|
@ -45,7 +46,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
|
||||||
contentMD5,
|
contentMD5,
|
||||||
});
|
});
|
||||||
log.trace('contentMD5 does not match, deleting data');
|
log.trace('contentMD5 does not match, deleting data');
|
||||||
data.batchDelete(dataRetrievalInfo, log);
|
data.batchDelete(dataRetrievalInfo, null, null, log);
|
||||||
return cb(errors.BadDigest);
|
return cb(errors.BadDigest);
|
||||||
}
|
}
|
||||||
if (completedHash) {
|
if (completedHash) {
|
||||||
|
@ -55,7 +56,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stores object and responds back with location and storage type
|
* Stores object and responds back with key and storage type
|
||||||
* @param {object} objectContext - object's keyContext for sproxyd Key
|
* @param {object} objectContext - object's keyContext for sproxyd Key
|
||||||
* computation (put API)
|
* computation (put API)
|
||||||
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
||||||
|
@ -64,14 +65,16 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
|
||||||
* @param {object | null } streamingV4Params - if v4 auth, object containing
|
* @param {object | null } streamingV4Params - if v4 auth, object containing
|
||||||
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
|
||||||
* credentialScope (to be used for streaming v4 auth if applicable)
|
* credentialScope (to be used for streaming v4 auth if applicable)
|
||||||
|
* @param {BackendInfo} backendInfo - info to determine which data
|
||||||
|
* backend to use
|
||||||
* @param {RequestLogger} log - the current stream logger
|
* @param {RequestLogger} log - the current stream logger
|
||||||
* @param {function} cb - callback containing result for the next task
|
* @param {function} cb - callback containing result for the next task
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export function dataStore(objectContext, cipherBundle, stream, size,
|
function dataStore(objectContext, cipherBundle, stream, size,
|
||||||
streamingV4Params, log, cb) {
|
streamingV4Params, backendInfo, log, cb) {
|
||||||
const dataStream = prepareStream(stream, streamingV4Params, log, cb);
|
const dataStream = prepareStream(stream, streamingV4Params, log, cb);
|
||||||
data.put(cipherBundle, dataStream, size, objectContext, log,
|
data.put(cipherBundle, dataStream, size, objectContext, backendInfo, log,
|
||||||
(err, dataRetrievalInfo, hashedStream) => {
|
(err, dataRetrievalInfo, hashedStream) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error in datastore', {
|
log.error('error in datastore', {
|
||||||
|
@ -100,3 +103,7 @@ export function dataStore(objectContext, cipherBundle, stream, size,
|
||||||
return undefined;
|
return undefined;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
dataStore,
|
||||||
|
};
|
||||||
|
|
|
@ -0,0 +1,226 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const { parseString } = require('xml2js');
|
||||||
|
|
||||||
|
const escapeForXml = require('../../../utilities/escapeForXML');
|
||||||
|
|
||||||
|
const tagRegex = new RegExp(/[^a-zA-Z0-9 +-=._:/]/g);
|
||||||
|
|
||||||
|
const errorInvalidArgument = errors.InvalidArgument
|
||||||
|
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
|
||||||
|
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
|
||||||
|
'tag name duplicates.');
|
||||||
|
const errorBadRequestLimit10 = errors.BadRequest
|
||||||
|
.customizeDescription('Object tags cannot be greater than 10');
|
||||||
|
|
||||||
|
/*
|
||||||
|
Format of xml request:
|
||||||
|
|
||||||
|
<Tagging>
|
||||||
|
<TagSet>
|
||||||
|
<Tag>
|
||||||
|
<Key>Tag Name</Key>
|
||||||
|
<Value>Tag Value</Value>
|
||||||
|
</Tag>
|
||||||
|
</TagSet>
|
||||||
|
</Tagging>
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
const _validator = {
|
||||||
|
validateTagStructure: tag => tag
|
||||||
|
&& Object.keys(tag).length === 2
|
||||||
|
&& tag.Key && tag.Value
|
||||||
|
&& tag.Key.length === 1 && tag.Value.length === 1
|
||||||
|
&& tag.Key[0] !== undefined && tag.Value[0] !== undefined
|
||||||
|
&& typeof tag.Key[0] === 'string' && typeof tag.Value[0] === 'string',
|
||||||
|
|
||||||
|
validateXMLStructure: result =>
|
||||||
|
result && Object.keys(result).length === 1 &&
|
||||||
|
result.Tagging &&
|
||||||
|
result.Tagging.TagSet &&
|
||||||
|
result.Tagging.TagSet.length === 1 &&
|
||||||
|
(
|
||||||
|
result.Tagging.TagSet[0] === '' ||
|
||||||
|
result.Tagging.TagSet[0] &&
|
||||||
|
Object.keys(result.Tagging.TagSet[0]).length === 1 &&
|
||||||
|
result.Tagging.TagSet[0].Tag &&
|
||||||
|
Array.isArray(result.Tagging.TagSet[0].Tag)
|
||||||
|
),
|
||||||
|
|
||||||
|
validateKeyValue: (key, value) => {
|
||||||
|
if (key.length > 128 || key.match(tagRegex)) {
|
||||||
|
return errors.InvalidTag.customizeDescription('The TagKey you ' +
|
||||||
|
'have provided is invalid');
|
||||||
|
}
|
||||||
|
if (value.length > 256 || value.match(tagRegex)) {
|
||||||
|
return errors.InvalidTag.customizeDescription('The TagValue you ' +
|
||||||
|
'have provided is invalid');
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
/** _validateTags - Validate tags, returning an error if tags are invalid
|
||||||
|
* @param {object[]} tags - tags parsed from xml to be validated
|
||||||
|
* @param {string[]} tags[].Key - Name of the tag
|
||||||
|
* @param {string[]} tags[].Value - Value of the tag
|
||||||
|
* @return {(Error|object)} tagsResult - return object tags on success
|
||||||
|
* { key: value}; error on failure
|
||||||
|
*/
|
||||||
|
function _validateTags(tags) {
|
||||||
|
let result;
|
||||||
|
const tagsResult = {};
|
||||||
|
|
||||||
|
if (tags.length === 0) {
|
||||||
|
return tagsResult;
|
||||||
|
}
|
||||||
|
// Maximum number of tags per resource: 10
|
||||||
|
if (tags.length > 10) {
|
||||||
|
return errorBadRequestLimit10;
|
||||||
|
}
|
||||||
|
for (let i = 0; i < tags.length; i++) {
|
||||||
|
const tag = tags[i];
|
||||||
|
|
||||||
|
if (!_validator.validateTagStructure(tag)) {
|
||||||
|
return errors.MalformedXML;
|
||||||
|
}
|
||||||
|
const key = tag.Key[0];
|
||||||
|
const value = tag.Value[0];
|
||||||
|
|
||||||
|
if (!key) {
|
||||||
|
return errors.InvalidTag.customizeDescription('The TagKey you ' +
|
||||||
|
'have provided is invalid');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allowed characters are letters, whitespace, and numbers, plus
|
||||||
|
// the following special characters: + - = . _ : /
|
||||||
|
// Maximum key length: 128 Unicode characters
|
||||||
|
// Maximum value length: 256 Unicode characters
|
||||||
|
result = _validator.validateKeyValue(key, value);
|
||||||
|
if (result instanceof Error) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
tagsResult[key] = value;
|
||||||
|
}
|
||||||
|
// not repeating keys
|
||||||
|
if (tags.length > Object.keys(tagsResult).length) {
|
||||||
|
return errors.InvalidTag.customizeDescription('Cannot provide ' +
|
||||||
|
'multiple Tags with the same key');
|
||||||
|
}
|
||||||
|
return tagsResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** parseTagXml - Parse and validate xml body, returning callback with object
|
||||||
|
* tags : { key: value}
|
||||||
|
* @param {string} xml - xml body to parse and validate
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} cb - callback to server
|
||||||
|
* @return {(Error|object)} - calls callback with tags object on success, error
|
||||||
|
* on failure
|
||||||
|
*/
|
||||||
|
function parseTagXml(xml, log, cb) {
|
||||||
|
parseString(xml, (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('xml parsing failed', {
|
||||||
|
error: err,
|
||||||
|
method: 'parseTagXml',
|
||||||
|
});
|
||||||
|
log.debug('invalid xml', { xml });
|
||||||
|
return cb(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
if (!_validator.validateXMLStructure(result)) {
|
||||||
|
log.debug('xml validation failed', {
|
||||||
|
error: errors.MalformedXML,
|
||||||
|
method: '_validator.validateXMLStructure',
|
||||||
|
xml,
|
||||||
|
});
|
||||||
|
return cb(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
// AWS does not return error if no tag
|
||||||
|
if (result.Tagging.TagSet[0] === '') {
|
||||||
|
return cb(null, []);
|
||||||
|
}
|
||||||
|
const validationRes = _validateTags(result.Tagging.TagSet[0].Tag);
|
||||||
|
if (validationRes instanceof Error) {
|
||||||
|
log.debug('tag validation failed', {
|
||||||
|
error: validationRes,
|
||||||
|
method: '_validateTags',
|
||||||
|
xml,
|
||||||
|
});
|
||||||
|
return cb(validationRes);
|
||||||
|
}
|
||||||
|
// if no error, validation returns tags object
|
||||||
|
return cb(null, validationRes);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function convertToXml(objectTags) {
|
||||||
|
const xml = [];
|
||||||
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||||
|
'<Tagging> <TagSet>');
|
||||||
|
if (objectTags && Object.keys(objectTags).length > 0) {
|
||||||
|
Object.keys(objectTags).forEach(key => {
|
||||||
|
xml.push(`<Tag><Key>${escapeForXml(key)}</Key>` +
|
||||||
|
`<Value>${escapeForXml(objectTags[key])}</Value></Tag>`);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
xml.push('</TagSet> </Tagging>');
|
||||||
|
return xml.join('');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** parseTagFromQuery - Parse and validate x-amz-tagging header (URL query
|
||||||
|
* parameter encoded), returning callback with object tags : { key: value}
|
||||||
|
* @param {string} tagQuery - tag(s) URL query parameter encoded
|
||||||
|
* @return {(Error|object)} - calls callback with tags object on success, error
|
||||||
|
* on failure
|
||||||
|
*/
|
||||||
|
function parseTagFromQuery(tagQuery) {
|
||||||
|
const tagsResult = {};
|
||||||
|
const pairs = tagQuery.split('&');
|
||||||
|
let key;
|
||||||
|
let value;
|
||||||
|
let emptyTag = 0;
|
||||||
|
if (pairs.length === 0) {
|
||||||
|
return tagsResult;
|
||||||
|
}
|
||||||
|
for (let i = 0; i < pairs.length; i++) {
|
||||||
|
const pair = pairs[i];
|
||||||
|
if (!pair) {
|
||||||
|
emptyTag ++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const pairArray = pair.split('=');
|
||||||
|
if (pairArray.length !== 2) {
|
||||||
|
return errorInvalidArgument;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
key = decodeURIComponent(pairArray[0]);
|
||||||
|
value = decodeURIComponent(pairArray[1]);
|
||||||
|
} catch (err) {
|
||||||
|
return errorInvalidArgument;
|
||||||
|
}
|
||||||
|
if (!key) {
|
||||||
|
return errorInvalidArgument;
|
||||||
|
}
|
||||||
|
const errorResult = _validator.validateKeyValue(key, value);
|
||||||
|
if (errorResult instanceof Error) {
|
||||||
|
return errorResult;
|
||||||
|
}
|
||||||
|
tagsResult[key] = value;
|
||||||
|
}
|
||||||
|
// return InvalidArgument error if using the same key multiple times
|
||||||
|
if (pairs.length - emptyTag > Object.keys(tagsResult).length) {
|
||||||
|
return errorInvalidArgument;
|
||||||
|
}
|
||||||
|
if (Object.keys(tagsResult).length > 10) {
|
||||||
|
return errorBadRequestLimit10;
|
||||||
|
}
|
||||||
|
return tagsResult;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
_validator,
|
||||||
|
parseTagXml,
|
||||||
|
convertToXml,
|
||||||
|
parseTagFromQuery,
|
||||||
|
};
|
|
@ -0,0 +1,321 @@
|
||||||
|
const { errors, versioning } = require('arsenal');
|
||||||
|
const async = require('async');
|
||||||
|
|
||||||
|
const metadata = require('../../../metadata/wrapper');
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
// Use Arsenal function to generate a version ID used internally by metadata
|
||||||
|
// for null versions that are created before bucket versioning is configured
|
||||||
|
const nonVersionedObjId =
|
||||||
|
versionIdUtils.getInfVid(config.replicationGroupId);
|
||||||
|
|
||||||
|
/** decodedVidResult - decode the version id from a query object
|
||||||
|
* @param {object} [reqQuery] - request query object
|
||||||
|
* @param {string} [reqQuery.versionId] - version ID sent in request query
|
||||||
|
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
|
||||||
|
* fails due to improper format, otherwise undefined or the decoded version id
|
||||||
|
*/
|
||||||
|
function decodeVersionId(reqQuery) {
|
||||||
|
if (!reqQuery || !reqQuery.versionId) {
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
let versionId = reqQuery.versionId;
|
||||||
|
if (versionId === 'null') {
|
||||||
|
return versionId;
|
||||||
|
}
|
||||||
|
versionId = versionIdUtils.decode(versionId);
|
||||||
|
if (versionId instanceof Error) {
|
||||||
|
return errors.InvalidArgument
|
||||||
|
.customizeDescription('Invalid version id specified');
|
||||||
|
}
|
||||||
|
return versionId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** getVersionIdResHeader - return encrypted version ID if appropriate
|
||||||
|
* @param {object} [verCfg] - bucket versioning configuration
|
||||||
|
* @param {object} objectMD - object metadata
|
||||||
|
* @return {(string|undefined)} - undefined or encrypted version ID
|
||||||
|
* (if not 'null')
|
||||||
|
*/
|
||||||
|
function getVersionIdResHeader(verCfg, objectMD) {
|
||||||
|
if (verCfg) {
|
||||||
|
if (objectMD.isNull || (objectMD && !objectMD.versionId)) {
|
||||||
|
return 'null';
|
||||||
|
}
|
||||||
|
return versionIdUtils.encode(objectMD.versionId);
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks for versionId in request query and returns error if it is there
|
||||||
|
* @param {object} query - request query
|
||||||
|
* @return {(Error|undefined)} - customized InvalidArgument error or undefined
|
||||||
|
*/
|
||||||
|
function checkQueryVersionId(query) {
|
||||||
|
if (query && query.versionId !== undefined) {
|
||||||
|
const customMsg = 'This operation does not accept a version-id.';
|
||||||
|
return errors.InvalidArgument.customizeDescription(customMsg);
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
||||||
|
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error from metadata storing null version as new version',
|
||||||
|
{ error: err });
|
||||||
|
}
|
||||||
|
cb(err, options);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** get location of data for deletion
|
||||||
|
* @param {string} bucketName - name of bucket
|
||||||
|
* @param {string} objKey - name of object key
|
||||||
|
* @param {object} options - metadata options for getting object MD
|
||||||
|
* @param {string} options.versionId - version to get from metadata
|
||||||
|
* @param {RequestLogger} log - logger instanceof
|
||||||
|
* @param {function} cb - callback
|
||||||
|
* @return {undefined} - and call callback with (err, dataToDelete)
|
||||||
|
*/
|
||||||
|
function _getDeleteLocations(bucketName, objKey, options, log, cb) {
|
||||||
|
return metadata.getObjectMD(bucketName, objKey, options, log,
|
||||||
|
(err, versionMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('err from metadata getting specified version', {
|
||||||
|
error: err,
|
||||||
|
method: '_getDeleteLocations',
|
||||||
|
});
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
if (!versionMD.location) {
|
||||||
|
return cb();
|
||||||
|
}
|
||||||
|
const dataToDelete = Array.isArray(versionMD.location) ?
|
||||||
|
versionMD.location : [versionMD.location];
|
||||||
|
return cb(null, dataToDelete);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
|
||||||
|
// before deleting null version md, retrieve location of data to delete
|
||||||
|
return _getDeleteLocations(bucketName, objKey, options, log,
|
||||||
|
(err, nullDataToDelete) => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('could not find null version metadata', {
|
||||||
|
error: err,
|
||||||
|
method: '_deleteNullVersionMD',
|
||||||
|
});
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
return metadata.deleteObjectMD(bucketName, objKey, options, log,
|
||||||
|
err => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('metadata error deleting null version',
|
||||||
|
{ error: err, method: '_deleteNullVersionMD' });
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
return cb(null, nullDataToDelete);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function processVersioningState(mst, vstat, cb) {
|
||||||
|
const options = {};
|
||||||
|
const storeOptions = {};
|
||||||
|
const delOptions = {};
|
||||||
|
// object does not exist or is not versioned (before versioning)
|
||||||
|
if (mst.versionId === undefined || mst.isNull) {
|
||||||
|
// versioning is suspended, overwrite existing master version
|
||||||
|
if (vstat === 'Suspended') {
|
||||||
|
options.versionId = '';
|
||||||
|
options.isNull = true;
|
||||||
|
options.dataToDelete = mst.objLocation;
|
||||||
|
// if null version exists, clean it up prior to put
|
||||||
|
if (mst.isNull) {
|
||||||
|
delOptions.versionId = mst.versionId;
|
||||||
|
return cb(null, options, null, delOptions);
|
||||||
|
}
|
||||||
|
return cb(null, options);
|
||||||
|
}
|
||||||
|
// versioning is enabled, create a new version
|
||||||
|
options.versioning = true;
|
||||||
|
if (mst.exists) {
|
||||||
|
// store master version in a new key
|
||||||
|
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
|
||||||
|
storeOptions.versionId = versionId;
|
||||||
|
storeOptions.isNull = true;
|
||||||
|
options.nullVersionId = versionId;
|
||||||
|
return cb(null, options, storeOptions);
|
||||||
|
}
|
||||||
|
return cb(null, options);
|
||||||
|
}
|
||||||
|
// master is versioned and is not a null version
|
||||||
|
const nullVersionId = mst.nullVersionId;
|
||||||
|
if (vstat === 'Suspended') {
|
||||||
|
// versioning is suspended, overwrite the existing master version
|
||||||
|
options.versionId = '';
|
||||||
|
options.isNull = true;
|
||||||
|
if (nullVersionId === undefined) {
|
||||||
|
return cb(null, options);
|
||||||
|
}
|
||||||
|
delOptions.versionId = nullVersionId;
|
||||||
|
return cb(null, options, null, delOptions);
|
||||||
|
}
|
||||||
|
// versioning is enabled, put the new version
|
||||||
|
options.versioning = true;
|
||||||
|
options.nullVersionId = nullVersionId;
|
||||||
|
return cb(null, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getMasterState(objMD) {
|
||||||
|
if (!objMD) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
const mst = {
|
||||||
|
exists: true,
|
||||||
|
versionId: objMD.versionId,
|
||||||
|
isNull: objMD.isNull,
|
||||||
|
nullVersionId: objMD.nullVersionId,
|
||||||
|
};
|
||||||
|
if (objMD.location) {
|
||||||
|
mst.objLocation = Array.isArray(objMD.location) ?
|
||||||
|
objMD.location : [objMD.location];
|
||||||
|
}
|
||||||
|
return mst;
|
||||||
|
}
|
||||||
|
/** versioningPreprocessing - return versioning information for S3 to handle
|
||||||
|
* creation of new versions and manage deletion of old data and metadata
|
||||||
|
* @param {string} bucketName - name of bucket
|
||||||
|
* @param {object} bucketMD - bucket metadata
|
||||||
|
* @param {string} objectKey - name of object
|
||||||
|
* @param {object} objMD - obj metadata
|
||||||
|
* @param {RequestLogger} log - logger instance
|
||||||
|
* @param {function} callback - callback
|
||||||
|
* @return {undefined} and call callback with params (err, options):
|
||||||
|
* options.dataToDelete - (array/undefined) location of data to delete
|
||||||
|
* options.versionId - specific versionId to overwrite in metadata
|
||||||
|
* ('' overwrites the master version)
|
||||||
|
* options.versioning - (true/undefined) metadata instruction to create new ver
|
||||||
|
* options.isNull - (true/undefined) whether new version is null or not
|
||||||
|
* options.nullVersionId - if storing a null version in version history, the
|
||||||
|
* version id of the null version
|
||||||
|
* options.deleteNullVersionData - whether to delete the data of the null ver
|
||||||
|
*/
|
||||||
|
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
|
log, callback) {
|
||||||
|
const options = {};
|
||||||
|
const mst = getMasterState(objMD);
|
||||||
|
const vCfg = bucketMD.getVersioningConfiguration();
|
||||||
|
// bucket is not versioning configured
|
||||||
|
if (!vCfg) {
|
||||||
|
options.dataToDelete = mst.objLocation;
|
||||||
|
return process.nextTick(callback, null, options);
|
||||||
|
}
|
||||||
|
// bucket is versioning configured
|
||||||
|
return async.waterfall([
|
||||||
|
function processState(next) {
|
||||||
|
processVersioningState(mst, vCfg.Status,
|
||||||
|
(err, options, storeOptions, delOptions) => {
|
||||||
|
process.nextTick(next, err, options, storeOptions,
|
||||||
|
delOptions);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function storeVersion(options, storeOptions, delOptions, next) {
|
||||||
|
if (!storeOptions) {
|
||||||
|
return process.nextTick(next, null, options, delOptions);
|
||||||
|
}
|
||||||
|
const versionMD = Object.assign({}, objMD, storeOptions);
|
||||||
|
const params = { versionId: storeOptions.versionId };
|
||||||
|
return _storeNullVersionMD(bucketName, objectKey, versionMD,
|
||||||
|
params, log, err => next(err, options, delOptions));
|
||||||
|
},
|
||||||
|
function deleteNullVersion(options, delOptions, next) {
|
||||||
|
if (!delOptions) {
|
||||||
|
return process.nextTick(next, null, options);
|
||||||
|
}
|
||||||
|
return _deleteNullVersionMD(bucketName, objectKey, delOptions, log,
|
||||||
|
(err, nullDataToDelete) => {
|
||||||
|
if (err) {
|
||||||
|
log.warn('unexpected error deleting null version md', {
|
||||||
|
error: err,
|
||||||
|
method: 'versioningPreprocessing',
|
||||||
|
});
|
||||||
|
// it's possible there was a concurrent request to delete
|
||||||
|
// the null version, so proceed with putting a new version
|
||||||
|
if (err === errors.NoSuchKey) {
|
||||||
|
return next(null, options);
|
||||||
|
}
|
||||||
|
return next(errors.InternalError);
|
||||||
|
}
|
||||||
|
Object.assign(options, { dataToDelete: nullDataToDelete });
|
||||||
|
return next(null, options);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
], (err, options) => callback(err, options));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** preprocessingVersioningDelete - return versioning information for S3 to
|
||||||
|
* manage deletion of objects and versions, including creation of delete markers
|
||||||
|
* @param {string} bucketName - name of bucket
|
||||||
|
* @param {object} bucketMD - bucket metadata
|
||||||
|
* @param {object} objectMD - obj metadata
|
||||||
|
* @param {string} [reqVersionId] - specific version ID sent as part of request
|
||||||
|
* @param {RequestLogger} log - logger instance
|
||||||
|
* @param {function} callback - callback
|
||||||
|
* @return {undefined} and call callback with params (err, options):
|
||||||
|
* options.deleteData - (true/undefined) whether to delete data (if undefined
|
||||||
|
* means creating a delete marker instead)
|
||||||
|
* options.versionId - specific versionId to delete
|
||||||
|
*/
|
||||||
|
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
|
||||||
|
reqVersionId, log, callback) {
|
||||||
|
const options = {};
|
||||||
|
// bucket is not versioning enabled
|
||||||
|
if (!bucketMD.getVersioningConfiguration()) {
|
||||||
|
options.deleteData = true;
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
// bucket is versioning enabled
|
||||||
|
if (reqVersionId && reqVersionId !== 'null') {
|
||||||
|
// deleting a specific version
|
||||||
|
options.deleteData = true;
|
||||||
|
options.versionId = reqVersionId;
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
if (reqVersionId) {
|
||||||
|
// deleting the 'null' version if it exists
|
||||||
|
if (objectMD.versionId === undefined) {
|
||||||
|
// object is not versioned, deleting it
|
||||||
|
options.deleteData = true;
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
if (objectMD.isNull) {
|
||||||
|
// master is the null version
|
||||||
|
options.deleteData = true;
|
||||||
|
options.versionId = objectMD.versionId;
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
if (objectMD.nullVersionId) {
|
||||||
|
// null version exists, deleting it
|
||||||
|
options.deleteData = true;
|
||||||
|
options.versionId = objectMD.nullVersionId;
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
// null version does not exist, no deletion
|
||||||
|
// TODO check AWS behaviour for no deletion (seems having no error)
|
||||||
|
return callback(errors.NoSuchKey);
|
||||||
|
}
|
||||||
|
// not deleting any specific version, making a delete marker instead
|
||||||
|
return callback(null, options);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
decodeVersionId,
|
||||||
|
getVersionIdResHeader,
|
||||||
|
checkQueryVersionId,
|
||||||
|
versioningPreprocessing,
|
||||||
|
preprocessingVersioningDelete,
|
||||||
|
};
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* findRoutingRule - find applicable routing rule from bucket metadata
|
* findRoutingRule - find applicable routing rule from bucket metadata
|
||||||
* @param {RoutingRule []} routingRules - array of routingRule objects
|
* @param {RoutingRule []} routingRules - array of routingRule objects
|
||||||
|
@ -8,7 +7,7 @@
|
||||||
* keys/values from routingRule.getRedirect() plus
|
* keys/values from routingRule.getRedirect() plus
|
||||||
* a key of prefixFromRule and a value of routingRule.condition.keyPrefixEquals
|
* a key of prefixFromRule and a value of routingRule.condition.keyPrefixEquals
|
||||||
*/
|
*/
|
||||||
export function findRoutingRule(routingRules, key, errCode) {
|
function findRoutingRule(routingRules, key, errCode) {
|
||||||
if (!routingRules || routingRules.length === 0) {
|
if (!routingRules || routingRules.length === 0) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
@ -70,7 +69,7 @@ export function findRoutingRule(routingRules, key, errCode) {
|
||||||
* @return {object} redirectInfo - select key/values stored in
|
* @return {object} redirectInfo - select key/values stored in
|
||||||
* WebsiteConfiguration for a redirect -- protocol, replaceKeyWith and hostName
|
* WebsiteConfiguration for a redirect -- protocol, replaceKeyWith and hostName
|
||||||
*/
|
*/
|
||||||
export function extractRedirectInfo(location) {
|
function extractRedirectInfo(location) {
|
||||||
const redirectInfo = { redirectLocationHeader: true };
|
const redirectInfo = { redirectLocationHeader: true };
|
||||||
if (location.startsWith('/')) {
|
if (location.startsWith('/')) {
|
||||||
// redirect to another object in bucket
|
// redirect to another object in bucket
|
||||||
|
@ -89,3 +88,21 @@ export function extractRedirectInfo(location) {
|
||||||
}
|
}
|
||||||
return redirectInfo;
|
return redirectInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* validateWebsiteHeader description]
|
||||||
|
* @param {string} header - value of
|
||||||
|
* x-amz-website-redirect-location header on a put
|
||||||
|
* object (or similar request -- initiate mpu, object copy)
|
||||||
|
* @return {boolean} true if valid, false if not
|
||||||
|
*/
|
||||||
|
function validateWebsiteHeader(header) {
|
||||||
|
return (!header || header.startsWith('/') ||
|
||||||
|
header.startsWith('http://') || header.startsWith('https://'));
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
findRoutingRule,
|
||||||
|
extractRedirectInfo,
|
||||||
|
validateWebsiteHeader,
|
||||||
|
};
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { deleteBucket } from './apiUtils/bucket/bucketDeletion';
|
const { deleteBucket } = require('./apiUtils/bucket/bucketDeletion');
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
|
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
|
||||||
|
@ -15,7 +15,7 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* with the result and response headers
|
* with the result and response headers
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketDelete(authInfo, request, log, cb) {
|
function bucketDelete(authInfo, request, log, cb) {
|
||||||
log.debug('processing request', { method: 'bucketDelete' });
|
log.debug('processing request', { method: 'bucketDelete' });
|
||||||
|
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
|
@ -28,20 +28,19 @@ export default function bucketDelete(authInfo, request, log, cb) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketDelete',
|
requestType: 'bucketDelete',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return services.metadataValidateAuthorization(metadataValParams,
|
return metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucketMD) => {
|
(err, bucketMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucketMD);
|
request.method, bucketMD);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request',
|
log.debug('error processing request',
|
||||||
{ method: 'metadataValidateAuthorization', error: err });
|
{ method: 'metadataValidateBucket', error: err });
|
||||||
return cb(err, corsHeaders);
|
return cb(err, corsHeaders);
|
||||||
}
|
}
|
||||||
log.trace('passed checks',
|
log.trace('passed checks',
|
||||||
{ method: 'metadataValidateAuthorization' });
|
{ method: 'metadataValidateBucket' });
|
||||||
return deleteBucket(bucketMD, bucketName, authInfo.getCanonicalID(),
|
return deleteBucket(bucketMD, bucketName, authInfo.getCanonicalID(),
|
||||||
log, err => {
|
log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -55,3 +54,5 @@ export default function bucketDelete(authInfo, request, log, cb) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDelete;
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ const requestType = 'bucketOwnerAction';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketDeleteCors(authInfo, request, log, callback) {
|
function bucketDeleteCors(authInfo, request, log, callback) {
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -66,3 +66,5 @@ export default function bucketDeleteCors(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteCors;
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
export default function bucketDeleteWebsite(authInfo, request, log, callback) {
|
function bucketDeleteWebsite(authInfo, request, log, callback) {
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -58,3 +58,5 @@ export default function bucketDeleteWebsite(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteWebsite;
|
||||||
|
|
|
@ -1,13 +1,16 @@
|
||||||
import querystring from 'querystring';
|
const querystring = require('querystring');
|
||||||
import constants from '../../constants';
|
const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
import services from '../services';
|
const constants = require('../../constants');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const services = require('../services');
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { errors } from 'arsenal';
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
// Sample XML response:
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
// Sample XML response for GET bucket objects:
|
||||||
/* <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
/* <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
<Name>example-bucket</Name>
|
<Name>example-bucket</Name>
|
||||||
<Prefix></Prefix>
|
<Prefix></Prefix>
|
||||||
|
@ -31,80 +34,135 @@ import { errors } from 'arsenal';
|
||||||
</CommonPrefixes>
|
</CommonPrefixes>
|
||||||
</ListBucketResult>*/
|
</ListBucketResult>*/
|
||||||
|
|
||||||
/**
|
/* eslint-disable max-len */
|
||||||
* bucketGet - Return list of objects in bucket
|
// sample XML response for GET bucket object versions:
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples
|
||||||
* requester's info
|
/*
|
||||||
* @param {object} request - http request object
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
* @param {function} log - Werelogs request logger
|
|
||||||
* @param {function} callback - callback to respond to http request
|
|
||||||
* with either error code or xml response body
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
export default function bucketGet(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketGet' });
|
|
||||||
const params = request.query;
|
|
||||||
const bucketName = request.bucketName;
|
|
||||||
const encoding = params['encoding-type'];
|
|
||||||
if (encoding !== undefined && encoding !== 'url') {
|
|
||||||
return callback(errors.InvalidArgument.customizeDescription('Invalid ' +
|
|
||||||
'Encoding Method specified in Request'));
|
|
||||||
}
|
|
||||||
const escapeXmlFn = encoding === 'url' ? querystring.escape : escapeForXML;
|
|
||||||
const requestMaxKeys = params['max-keys'] ?
|
|
||||||
Number.parseInt(params['max-keys'], 10) : 1000;
|
|
||||||
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
// AWS only returns 1000 keys even if max keys are greater.
|
|
||||||
// Max keys stated in response xml can be greater than actual
|
|
||||||
// keys returned.
|
|
||||||
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
|
||||||
|
|
||||||
const metadataValParams = {
|
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
|
||||||
authInfo,
|
<Name>bucket</Name>
|
||||||
bucketName,
|
<Prefix>my</Prefix>
|
||||||
requestType: 'bucketGet',
|
<KeyMarker/>
|
||||||
log,
|
<VersionIdMarker/>
|
||||||
};
|
<MaxKeys>5</MaxKeys>
|
||||||
const listParams = {
|
<Delimiter>/</Delimiter>
|
||||||
maxKeys: actualMaxKeys,
|
<NextKeyMarker>>my-second-image.jpg</NextKeyMarker>
|
||||||
delimiter: params.delimiter,
|
<NextVersionIdMarker>03jpff543dhffds434rfdsFDN943fdsFkdmqnh892</NextVersionIdMarker>
|
||||||
marker: params.marker,
|
<IsTruncated>true</IsTruncated>
|
||||||
prefix: params.prefix,
|
<Version>
|
||||||
};
|
<Key>my-image.jpg</Key>
|
||||||
|
<VersionId>3/L4kqtJl40Nr8X8gdRQBpUMLUo</VersionId>
|
||||||
|
<IsLatest>true</IsLatest>
|
||||||
|
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
|
||||||
|
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
|
||||||
|
<Size>434234</Size>
|
||||||
|
<StorageClass>STANDARD</StorageClass>
|
||||||
|
<Owner>
|
||||||
|
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
|
||||||
|
<DisplayName>mtd@amazon.com</DisplayName>
|
||||||
|
</Owner>
|
||||||
|
</Version>
|
||||||
|
<DeleteMarker>
|
||||||
|
<Key>my-second-image.jpg</Key>
|
||||||
|
<VersionId>03jpff543dhffds434rfdsFDN943fdsFkdmqnh892</VersionId>
|
||||||
|
<IsLatest>true</IsLatest>
|
||||||
|
<LastModified>2009-11-12T17:50:30.000Z</LastModified>
|
||||||
|
<Owner>
|
||||||
|
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
|
||||||
|
<DisplayName>mtd@amazon.com</DisplayName>
|
||||||
|
</Owner>
|
||||||
|
</DeleteMarker>
|
||||||
|
<CommonPrefixes>
|
||||||
|
<Prefix>photos/</Prefix>
|
||||||
|
</CommonPrefixes>
|
||||||
|
</ListVersionsResult>
|
||||||
|
*/
|
||||||
|
/* eslint-enable max-len */
|
||||||
|
|
||||||
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
|
function processVersions(bucketName, listParams, list) {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', { error: err });
|
|
||||||
return callback(err, null, corsHeaders);
|
|
||||||
}
|
|
||||||
return services.getObjectListing(bucketName, listParams, log,
|
|
||||||
(err, list) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error processing request', { error: err });
|
|
||||||
return callback(err, null, corsHeaders);
|
|
||||||
}
|
|
||||||
const xml = [];
|
const xml = [];
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/' +
|
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
'2006-03-01/">',
|
'<Name>', bucketName, '</Name>'
|
||||||
`<Name>${bucketName}</Name>`
|
|
||||||
);
|
);
|
||||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
const xmlParams = [
|
const xmlParams = [
|
||||||
{ tag: 'Prefix', value: listParams.prefix },
|
{ tag: 'Prefix', value: listParams.prefix },
|
||||||
{ tag: 'NextMarker', value: list.NextMarker },
|
{ tag: 'KeyMarker', value: listParams.keyMarker },
|
||||||
{ tag: 'Marker', value: listParams.marker },
|
{ tag: 'VersionIdMarker', value: listParams.versionIdMarker },
|
||||||
{ tag: 'MaxKeys', value: requestMaxKeys },
|
{ tag: 'NextKeyMarker', value: list.NextKeyMarker },
|
||||||
|
{ tag: 'NextVersionIdMarker', value: list.NextVersionIdMarker },
|
||||||
|
{ tag: 'MaxKeys', value: listParams.maxKeys },
|
||||||
{ tag: 'Delimiter', value: listParams.delimiter },
|
{ tag: 'Delimiter', value: listParams.delimiter },
|
||||||
{ tag: 'EncodingType', value: encoding },
|
{ tag: 'EncodingType', value: listParams.encoding },
|
||||||
{ tag: 'IsTruncated', value: isTruncated },
|
{ tag: 'IsTruncated', value: isTruncated },
|
||||||
];
|
];
|
||||||
|
|
||||||
|
const escapeXmlFn = listParams.encoding === 'url' ?
|
||||||
|
querystring.escape : escapeForXML;
|
||||||
|
xmlParams.forEach(p => {
|
||||||
|
if (p.value) {
|
||||||
|
const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ?
|
||||||
|
p.value : versionIdUtils.encode(p.value);
|
||||||
|
xml.push(`<${p.tag}>${escapeXmlFn(val)}</${p.tag}>`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
let lastKey = listParams.keyMarker ?
|
||||||
|
escapeXmlFn(listParams.keyMarker) : undefined;
|
||||||
|
list.Versions.forEach(item => {
|
||||||
|
const v = item.value;
|
||||||
|
const objectKey = escapeXmlFn(item.key);
|
||||||
|
const isLatest = lastKey !== objectKey;
|
||||||
|
lastKey = objectKey;
|
||||||
|
xml.push(
|
||||||
|
v.IsDeleteMarker ? '<DeleteMarker>' : '<Version>',
|
||||||
|
`<Key>${objectKey}</Key>`,
|
||||||
|
'<VersionId>',
|
||||||
|
(v.IsNull || v.VersionId === undefined) ?
|
||||||
|
'null' : versionIdUtils.encode(v.VersionId),
|
||||||
|
'</VersionId>',
|
||||||
|
`<IsLatest>${isLatest}</IsLatest>`,
|
||||||
|
`<LastModified>${v.LastModified}</LastModified>`,
|
||||||
|
`<ETag>"${v.ETag}"</ETag>`,
|
||||||
|
`<Size>${v.Size}</Size>`,
|
||||||
|
'<Owner>',
|
||||||
|
`<ID>${v.Owner.ID}</ID>`,
|
||||||
|
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||||
|
'</Owner>',
|
||||||
|
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||||
|
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>'
|
||||||
|
);
|
||||||
|
});
|
||||||
|
list.CommonPrefixes.forEach(item => {
|
||||||
|
const val = escapeXmlFn(item);
|
||||||
|
xml.push(`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`);
|
||||||
|
});
|
||||||
|
xml.push('</ListVersionsResult>');
|
||||||
|
return xml.join('');
|
||||||
|
}
|
||||||
|
|
||||||
|
function processMasterVersions(bucketName, listParams, list) {
|
||||||
|
const xml = [];
|
||||||
|
xml.push(
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
|
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
|
'<Name>', bucketName, '</Name>'
|
||||||
|
);
|
||||||
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
|
const xmlParams = [
|
||||||
|
{ tag: 'Prefix', value: listParams.prefix || '' },
|
||||||
|
{ tag: 'Marker', value: listParams.marker || '' },
|
||||||
|
{ tag: 'NextMarker', value: list.NextMarker },
|
||||||
|
{ tag: 'MaxKeys', value: listParams.maxKeys },
|
||||||
|
{ tag: 'Delimiter', value: listParams.delimiter },
|
||||||
|
{ tag: 'EncodingType', value: listParams.encoding },
|
||||||
|
{ tag: 'IsTruncated', value: isTruncated },
|
||||||
|
];
|
||||||
|
|
||||||
|
const escapeXmlFn = listParams.encoding === 'url' ?
|
||||||
|
querystring.escape : escapeForXML;
|
||||||
xmlParams.forEach(p => {
|
xmlParams.forEach(p => {
|
||||||
if (p.value) {
|
if (p.value) {
|
||||||
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
|
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
|
||||||
|
@ -117,9 +175,11 @@ export default function bucketGet(authInfo, request, log, callback) {
|
||||||
|
|
||||||
list.Contents.forEach(item => {
|
list.Contents.forEach(item => {
|
||||||
const v = item.value;
|
const v = item.value;
|
||||||
|
if (v.isDeleteMarker) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
const objectKey = escapeXmlFn(item.key);
|
const objectKey = escapeXmlFn(item.key);
|
||||||
|
return xml.push(
|
||||||
xml.push(
|
|
||||||
'<Contents>',
|
'<Contents>',
|
||||||
`<Key>${objectKey}</Key>`,
|
`<Key>${objectKey}</Key>`,
|
||||||
`<LastModified>${v.LastModified}</LastModified>`,
|
`<LastModified>${v.LastModified}</LastModified>`,
|
||||||
|
@ -135,17 +195,87 @@ export default function bucketGet(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
list.CommonPrefixes.forEach(item => {
|
list.CommonPrefixes.forEach(item => {
|
||||||
const val = escapeXmlFn(item);
|
const val = escapeXmlFn(item);
|
||||||
xml.push(
|
xml.push(`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`);
|
||||||
`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`
|
|
||||||
);
|
|
||||||
});
|
});
|
||||||
xml.push('</ListBucketResult>');
|
xml.push('</ListBucketResult>');
|
||||||
pushMetric('listBucket', log, {
|
return xml.join('');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bucketGet - Return list of objects in bucket
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||||
|
* requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {function} log - Werelogs request logger
|
||||||
|
* @param {function} callback - callback to respond to http request
|
||||||
|
* with either error code or xml response body
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketGet(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketGet' });
|
||||||
|
const params = request.query;
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
const encoding = params['encoding-type'];
|
||||||
|
if (encoding !== undefined && encoding !== 'url') {
|
||||||
|
return callback(errors.InvalidArgument.customizeDescription('Invalid ' +
|
||||||
|
'Encoding Method specified in Request'));
|
||||||
|
}
|
||||||
|
const requestMaxKeys = params['max-keys'] ?
|
||||||
|
Number.parseInt(params['max-keys'], 10) : 1000;
|
||||||
|
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||||
|
return callback(errors.InvalidArgument);
|
||||||
|
}
|
||||||
|
// AWS only returns 1000 keys even if max keys are greater.
|
||||||
|
// Max keys stated in response xml can be greater than actual
|
||||||
|
// keys returned.
|
||||||
|
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucketName,
|
||||||
});
|
requestType: 'bucketGet',
|
||||||
return callback(null, xml.join(''), corsHeaders);
|
};
|
||||||
|
const listParams = {
|
||||||
|
listingType: 'DelimiterMaster',
|
||||||
|
maxKeys: actualMaxKeys,
|
||||||
|
delimiter: params.delimiter,
|
||||||
|
marker: params.marker,
|
||||||
|
prefix: params.prefix,
|
||||||
|
};
|
||||||
|
|
||||||
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { error: err });
|
||||||
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
if (params.versions !== undefined) {
|
||||||
|
listParams.listingType = 'DelimiterVersions';
|
||||||
|
delete listParams.marker;
|
||||||
|
listParams.keyMarker = params['key-marker'];
|
||||||
|
listParams.versionIdMarker = params['version-id-marker'] ?
|
||||||
|
versionIdUtils.decode(params['version-id-marker']) : undefined;
|
||||||
|
}
|
||||||
|
return services.getObjectListing(bucketName, listParams, log,
|
||||||
|
(err, list) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { error: err });
|
||||||
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
listParams.maxKeys = requestMaxKeys;
|
||||||
|
listParams.encoding = encoding;
|
||||||
|
let res = undefined;
|
||||||
|
if (listParams.listingType === 'DelimiterVersions') {
|
||||||
|
res = processVersions(bucketName, listParams, list);
|
||||||
|
} else {
|
||||||
|
res = processMasterVersions(bucketName, listParams, list);
|
||||||
|
}
|
||||||
|
pushMetric('listBucket', log, { authInfo, bucket: bucketName });
|
||||||
|
return callback(null, res, corsHeaders);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGet;
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
import vault from '../auth/vault';
|
const vault = require('../auth/vault');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
// Sample XML response:
|
// Sample XML response:
|
||||||
/*
|
/*
|
||||||
|
@ -36,7 +36,7 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* with either error code or xml response body
|
* with either error code or xml response body
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketGetACL(authInfo, request, log, callback) {
|
function bucketGetACL(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketGetACL' });
|
log.debug('processing request', { method: 'bucketGetACL' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -45,7 +45,6 @@ export default function bucketGetACL(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketGetACL',
|
requestType: 'bucketGetACL',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const grantInfo = {
|
const grantInfo = {
|
||||||
grants: [],
|
grants: [],
|
||||||
|
@ -60,7 +59,7 @@ export default function bucketGetACL(authInfo, request, log, callback) {
|
||||||
constants.logId,
|
constants.logId,
|
||||||
];
|
];
|
||||||
|
|
||||||
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -171,3 +170,5 @@ export default function bucketGetACL(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetACL;
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { convertToXml } from './apiUtils/bucket/bucketCors';
|
const { convertToXml } = require('./apiUtils/bucket/bucketCors');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ const requestType = 'bucketOwnerAction';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketGetCors(authInfo, request, log, callback) {
|
function bucketGetCors(authInfo, request, log, callback) {
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -58,3 +58,5 @@ export default function bucketGetCors(authInfo, request, log, callback) {
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetCors;
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Get Location - Get bucket locationConstraint configuration
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
|
||||||
|
function bucketGetLocation(authInfo, request, log, callback) {
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
return metadata.getBucket(bucketName, log, (err, bucket) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('metadata getbucket failed', { error: err });
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
if (bucketShield(bucket, requestType)) {
|
||||||
|
return callback(errors.NoSuchBucket);
|
||||||
|
}
|
||||||
|
log.trace('found bucket in metadata');
|
||||||
|
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
|
||||||
|
if (!isBucketAuthorized(bucket, requestType, canonicalID)) {
|
||||||
|
log.debug('access denied for account on bucket', {
|
||||||
|
requestType,
|
||||||
|
method: 'bucketGetLocation',
|
||||||
|
});
|
||||||
|
return callback(errors.AccessDenied, null, corsHeaders);
|
||||||
|
}
|
||||||
|
|
||||||
|
let locationConstraint = bucket.getLocationConstraint();
|
||||||
|
if (!locationConstraint || locationConstraint === 'us-east-1') {
|
||||||
|
// AWS returns empty string if no region has been
|
||||||
|
// provided or for us-east-1
|
||||||
|
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
|
||||||
|
// if no locationConstraint provided.
|
||||||
|
locationConstraint = '';
|
||||||
|
}
|
||||||
|
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">` +
|
||||||
|
`${escapeForXML(locationConstraint)}</LocationConstraint>`;
|
||||||
|
pushMetric('getBucketLocation', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
|
||||||
|
return callback(null, xml, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetLocation;
|
|
@ -1,5 +1,6 @@
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
// Sample XML response:
|
// Sample XML response:
|
||||||
/*
|
/*
|
||||||
|
@ -44,7 +45,7 @@ function convertToXml(versioningConfiguration) {
|
||||||
* with either error code or xml response body
|
* with either error code or xml response body
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketGetVersioning(authInfo, request, log, callback) {
|
function bucketGetVersioning(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketGetVersioning' });
|
log.debug('processing request', { method: 'bucketGetVersioning' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -53,10 +54,9 @@ export default function bucketGetVersioning(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketOwnerAction',
|
requestType: 'bucketOwnerAction',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -66,11 +66,12 @@ export default function bucketGetVersioning(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const versioningConfiguration = bucket.getVersioningConfiguration();
|
const versioningConfiguration = bucket.getVersioningConfiguration();
|
||||||
const xml = convertToXml(versioningConfiguration);
|
const xml = convertToXml(versioningConfiguration);
|
||||||
// TODO push metric for bucketGetVersioning
|
pushMetric('getBucketVersioning', log, {
|
||||||
// pushMetric('bucketGetVersioning', log, {
|
authInfo,
|
||||||
// authInfo,
|
bucket: bucketName,
|
||||||
// bucket: bucketName,
|
});
|
||||||
// });
|
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetVersioning;
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import { convertToXml } from './apiUtils/bucket/bucketWebsite';
|
const { convertToXml } = require('./apiUtils/bucket/bucketWebsite');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ const requestType = 'bucketOwnerAction';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketGetWebsite(authInfo, request, log, callback) {
|
function bucketGetWebsite(authInfo, request, log, callback) {
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
|
||||||
|
@ -59,3 +59,5 @@ export default function bucketGetWebsite(authInfo, request, log, callback) {
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetWebsite;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Determine if bucket exists and if user has permission to access it
|
* Determine if bucket exists and if user has permission to access it
|
||||||
|
@ -12,16 +12,15 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* with either error code or success
|
* with either error code or success
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketHead(authInfo, request, log, callback) {
|
function bucketHead(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketHead' });
|
log.debug('processing request', { method: 'bucketHead' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketHead',
|
requestType: 'bucketHead',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -34,3 +33,5 @@ export default function bucketHead(authInfo, request, log, callback) {
|
||||||
return callback(null, corsHeaders);
|
return callback(null, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketHead;
|
||||||
|
|
|
@ -1,10 +1,54 @@
|
||||||
import { errors } from 'arsenal';
|
const { waterfall } = require('async');
|
||||||
|
const { parseString } = require('xml2js');
|
||||||
|
const { auth, errors } = require('arsenal');
|
||||||
|
|
||||||
import { createBucket } from './apiUtils/bucket/bucketCreation';
|
const vault = require('../auth/vault');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { createBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
import config from '../Config';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const { config } = require('../Config');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
|
const { locationConstraints, restEndpoints } = config;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* checkLocationConstraint - check that a location constraint is explicitly
|
||||||
|
* set on the bucket and the value of the location is listed in the
|
||||||
|
* locationConstraint config.
|
||||||
|
* Note: if data backend equals "multiple", you must set a location constraint
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {string} locationConstraint - the location constraint sent with
|
||||||
|
* the xml of the request
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
|
// AWS JS SDK sends a request with locationConstraint us-east-1 if
|
||||||
|
// no locationConstraint provided.
|
||||||
|
const { parsedHost } = request;
|
||||||
|
let locationConstraintChecked;
|
||||||
|
if (locationConstraint) {
|
||||||
|
locationConstraintChecked = locationConstraint;
|
||||||
|
} else if (parsedHost && restEndpoints[parsedHost]) {
|
||||||
|
locationConstraintChecked = restEndpoints[parsedHost];
|
||||||
|
} else {
|
||||||
|
log.trace('no location constraint provided on bucket put;' +
|
||||||
|
'setting us-east-1');
|
||||||
|
locationConstraintChecked = 'us-east-1';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!locationConstraints[locationConstraintChecked]) {
|
||||||
|
const errMsg = 'value of the location you are attempting to set - ' +
|
||||||
|
`${locationConstraintChecked} - is not listed in the ` +
|
||||||
|
'locationConstraint config';
|
||||||
|
log.trace(`locationConstraint is invalid - ${errMsg}`,
|
||||||
|
{ locationConstraint: locationConstraintChecked });
|
||||||
|
return { error: errors.InvalidLocationConstraint.
|
||||||
|
customizeDescription(errMsg) };
|
||||||
|
}
|
||||||
|
return { error: null, locationConstraint: locationConstraintChecked };
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -15,18 +59,46 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
</CreateBucketConfiguration>
|
</CreateBucketConfiguration>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
function _parseXML(request, log, cb) {
|
||||||
|
if (request.post) {
|
||||||
|
return parseString(request.post, (err, result) => {
|
||||||
|
if (err || !result.CreateBucketConfiguration
|
||||||
|
|| !result.CreateBucketConfiguration.LocationConstraint
|
||||||
|
|| !result.CreateBucketConfiguration.LocationConstraint[0]) {
|
||||||
|
log.debug('request xml is malformed');
|
||||||
|
return cb(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
const locationConstraint = result.CreateBucketConfiguration
|
||||||
|
.LocationConstraint[0];
|
||||||
|
log.trace('location constraint',
|
||||||
|
{ locationConstraint });
|
||||||
|
const locationCheck = checkLocationConstraint(request,
|
||||||
|
locationConstraint, log);
|
||||||
|
if (locationCheck.error) {
|
||||||
|
return cb(locationCheck.error);
|
||||||
|
}
|
||||||
|
return cb(null, locationCheck.locationConstraint);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return process.nextTick(() => {
|
||||||
|
const locationCheck = checkLocationConstraint(request,
|
||||||
|
undefined, log);
|
||||||
|
if (locationCheck.error) {
|
||||||
|
return cb(locationCheck.error);
|
||||||
|
}
|
||||||
|
return cb(null, locationCheck.locationConstraint);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PUT Service - Create bucket for the user
|
* PUT Service - Create bucket for the user
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
* @param {object} request - http request object
|
* @param {object} request - http request object
|
||||||
* @param {string | undefined} locationConstraint - locationConstraint for
|
|
||||||
* bucket (if any)
|
|
||||||
* @param {object} log - Werelogs logger
|
* @param {object} log - Werelogs logger
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketPut(authInfo, request, locationConstraint, log,
|
function bucketPut(authInfo, request, log, callback) {
|
||||||
callback) {
|
|
||||||
log.debug('processing request', { method: 'bucketPut' });
|
log.debug('processing request', { method: 'bucketPut' });
|
||||||
|
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
|
@ -37,21 +109,68 @@ export default function bucketPut(authInfo, request, locationConstraint, log,
|
||||||
log.trace('invalid acl header');
|
log.trace('invalid acl header');
|
||||||
return callback(errors.InvalidArgument);
|
return callback(errors.InvalidArgument);
|
||||||
}
|
}
|
||||||
const bucketName = request.bucketName;
|
const { bucketName } = request;
|
||||||
|
|
||||||
return createBucket(authInfo, bucketName, request.headers,
|
return waterfall([
|
||||||
locationConstraint, config.usEastBehavior, log,
|
next => _parseXML(request, log, next),
|
||||||
(err, previousBucket) => {
|
// Check policies in Vault for a user.
|
||||||
// if bucket already existed, gather any relevant cors headers
|
(locationConstraint, next) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
if (authInfo.isRequesterAnIAMUser()) {
|
||||||
request.method, previousBucket);
|
const authParams = auth.server.extractParams(request, log, 's3',
|
||||||
|
request.query);
|
||||||
|
const requestContextParams = {
|
||||||
|
constantParams: {
|
||||||
|
headers: request.headers,
|
||||||
|
query: request.query,
|
||||||
|
generalResource: bucketName,
|
||||||
|
specificResource: {
|
||||||
|
key: '',
|
||||||
|
},
|
||||||
|
requesterIp: request.socket.remoteAddress,
|
||||||
|
sslEnabled: request.connection.encrypted,
|
||||||
|
apiMethod: 'bucketPut',
|
||||||
|
awsService: 's3',
|
||||||
|
locationConstraint,
|
||||||
|
requesterInfo: authInfo,
|
||||||
|
signatureVersion: authParams.params.data.authType,
|
||||||
|
authType: authParams.params.data.signatureVersion,
|
||||||
|
signatureAge: authParams.params.data.signatureAge,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return vault.checkPolicies(requestContextParams,
|
||||||
|
authInfo.getArn(), log, (err, authorizationResults) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err, corsHeaders);
|
return next(err);
|
||||||
|
}
|
||||||
|
if (authorizationResults[0].isAllowed !== true) {
|
||||||
|
log.trace('authorization check failed for user',
|
||||||
|
{ locationConstraint });
|
||||||
|
return next(errors.AccessDenied);
|
||||||
|
}
|
||||||
|
return next(null, locationConstraint);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return next(null, locationConstraint);
|
||||||
|
},
|
||||||
|
(locationConstraint, next) => createBucket(authInfo, bucketName,
|
||||||
|
request.headers, locationConstraint, log, (err, previousBucket) => {
|
||||||
|
// if bucket already existed, gather any relevant cors
|
||||||
|
// headers
|
||||||
|
const corsHeaders = collectCorsHeaders(
|
||||||
|
request.headers.origin, request.method, previousBucket);
|
||||||
|
if (err) {
|
||||||
|
return next(err, corsHeaders);
|
||||||
}
|
}
|
||||||
pushMetric('createBucket', log, {
|
pushMetric('createBucket', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
});
|
});
|
||||||
return callback(null, corsHeaders);
|
return next(null, corsHeaders);
|
||||||
});
|
}),
|
||||||
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
checkLocationConstraint,
|
||||||
|
bucketPut,
|
||||||
|
};
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
import async from 'async';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import acl from '../metadata/acl';
|
const acl = require('../metadata/acl');
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
import vault from '../auth/vault';
|
const vault = require('../auth/vault');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -40,7 +40,7 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketPutACL(authInfo, request, log, callback) {
|
function bucketPutACL(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutACL' });
|
log.debug('processing request', { method: 'bucketPutACL' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -72,7 +72,6 @@ export default function bucketPutACL(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketPutACL',
|
requestType: 'bucketPutACL',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const possibleGrants = ['FULL_CONTROL', 'WRITE',
|
const possibleGrants = ['FULL_CONTROL', 'WRITE',
|
||||||
'WRITE_ACP', 'READ', 'READ_ACP'];
|
'WRITE_ACP', 'READ', 'READ_ACP'];
|
||||||
|
@ -103,12 +102,12 @@ export default function bucketPutACL(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function waterfall1(next) {
|
function waterfall1(next) {
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => {
|
(err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed', {
|
log.trace('request authorization failed', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'services.metadataValidateAuthorization',
|
method: 'metadataValidateBucket',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
|
@ -295,3 +294,5 @@ export default function bucketPutACL(authInfo, request, log, callback) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketPutACL;
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
import crypto from 'crypto';
|
const crypto = require('crypto');
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import async from 'async';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import { errors } from 'arsenal';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const metadata = require('../metadata/wrapper');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { parseCorsXml } = require('./apiUtils/bucket/bucketCors');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import metadata from '../metadata/wrapper';
|
|
||||||
import { parseCorsXml } from './apiUtils/bucket/bucketCors';
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
@ -20,7 +19,7 @@ const requestType = 'bucketOwnerAction';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketPutCors(authInfo, request, log, callback) {
|
function bucketPutCors(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutCors' });
|
log.debug('processing request', { method: 'bucketPutCors' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
@ -92,3 +91,5 @@ export default function bucketPutCors(authInfo, request, log, callback) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketPutCors;
|
||||||
|
|
|
@ -0,0 +1,71 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const getReplicationConfiguration =
|
||||||
|
require('./apiUtils/bucket/getReplicationConfiguration');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
// The error response when a bucket does not have versioning 'Enabled'.
|
||||||
|
const versioningNotEnabledError = errors.InvalidRequest.customizeDescription(
|
||||||
|
'Versioning must be \'Enabled\' on the bucket to apply a replication ' +
|
||||||
|
'configuration');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bucketPutReplication - Create or update bucket replication configuration
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketPutReplication(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketPutReplication' });
|
||||||
|
const { bucketName, post, headers, method } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: 'bucketOwnerAction',
|
||||||
|
};
|
||||||
|
return waterfall([
|
||||||
|
// Validate the request XML and return the replication configuration.
|
||||||
|
next => getReplicationConfiguration(post, log, next),
|
||||||
|
// Check bucket user privileges and ensure versioning is 'Enabled'.
|
||||||
|
(config, next) =>
|
||||||
|
// TODO: Validate that destination bucket exists and has versioning.
|
||||||
|
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
// Replication requires that versioning is 'Enabled'.
|
||||||
|
if (!bucket.isVersioningEnabled(bucket)) {
|
||||||
|
return next(versioningNotEnabledError);
|
||||||
|
}
|
||||||
|
return next(null, config, bucket);
|
||||||
|
}),
|
||||||
|
// Set the replication configuration and update the bucket metadata.
|
||||||
|
(config, bucket, next) => {
|
||||||
|
bucket.setReplicationConfiguration(config);
|
||||||
|
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
||||||
|
next(err, bucket));
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.trace('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketPutReplication',
|
||||||
|
});
|
||||||
|
return callback(err, corsHeaders);
|
||||||
|
}
|
||||||
|
pushMetric('putBucketReplication', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketPutReplication;
|
|
@ -1,9 +1,11 @@
|
||||||
import { waterfall } from 'async';
|
const { waterfall } = require('async');
|
||||||
import { parseString } from 'xml2js';
|
const { parseString } = require('xml2js');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import services from '../services';
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Format of xml request:
|
* Format of xml request:
|
||||||
|
@ -17,6 +19,37 @@ import services from '../services';
|
||||||
x-amz-mfa: [SerialNumber] [TokenCode]
|
x-amz-mfa: [SerialNumber] [TokenCode]
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
function _parseXML(request, log, cb) {
|
||||||
|
if (request.post === '') {
|
||||||
|
log.debug('request xml is missing');
|
||||||
|
return cb(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
return parseString(request.post, (err, result) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('request xml is malformed');
|
||||||
|
return cb(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
const versioningConf = result.VersioningConfiguration;
|
||||||
|
const status = versioningConf.Status ?
|
||||||
|
versioningConf.Status[0] : undefined;
|
||||||
|
const mfaDelete = versioningConf.MfaDelete ?
|
||||||
|
versioningConf.MfaDelete[0] : undefined;
|
||||||
|
const validStatuses = ['Enabled', 'Suspended'];
|
||||||
|
const validMfaDeletes = [undefined, 'Enabled', 'Disabled'];
|
||||||
|
if (validStatuses.indexOf(status) < 0 ||
|
||||||
|
validMfaDeletes.indexOf(mfaDelete) < 0) {
|
||||||
|
log.debug('illegal versioning configuration');
|
||||||
|
return cb(errors.IllegalVersioningConfigurationException);
|
||||||
|
}
|
||||||
|
if (versioningConf && mfaDelete === 'Enabled') {
|
||||||
|
log.debug('mfa deletion is not implemented');
|
||||||
|
return cb(errors.NotImplemented
|
||||||
|
.customizeDescription('MFA Deletion is not supported yet.'));
|
||||||
|
}
|
||||||
|
return process.nextTick(() => cb(null));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Bucket Put Versioning - Create or update bucket Versioning
|
* Bucket Put Versioning - Create or update bucket Versioning
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
@ -25,7 +58,7 @@ import services from '../services';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketPutVersioning(authInfo, request, log, callback) {
|
function bucketPutVersioning(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutVersioning' });
|
log.debug('processing request', { method: 'bucketPutVersioning' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -33,11 +66,11 @@ export default function bucketPutVersioning(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
requestType: 'bucketOwnerAction',
|
requestType: 'bucketOwnerAction',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return waterfall([
|
return waterfall([
|
||||||
next => services.metadataValidateAuthorization(metadataValParams,
|
next => _parseXML(request, log, next),
|
||||||
|
next => metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => next(err, bucket)), // ignore extra null object,
|
(err, bucket) => next(err, bucket)), // ignore extra null object,
|
||||||
(bucket, next) => parseString(request.post, (err, result) => {
|
(bucket, next) => parseString(request.post, (err, result) => {
|
||||||
// just for linting; there should not be any parsing error here
|
// just for linting; there should not be any parsing error here
|
||||||
|
@ -68,14 +101,14 @@ export default function bucketPutVersioning(authInfo, request, log, callback) {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error processing request', { error: err,
|
log.trace('error processing request', { error: err,
|
||||||
method: 'bucketPutVersioning' });
|
method: 'bucketPutVersioning' });
|
||||||
|
} else {
|
||||||
|
pushMetric('putBucketVersioning', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
// TODO push metrics for bucketPutVersioning
|
|
||||||
// else {
|
|
||||||
// pushMetric('bucketPutVersioning', log, {
|
|
||||||
// authInfo,
|
|
||||||
// bucket: bucketName,
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketPutVersioning;
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
import async from 'async';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { parseWebsiteConfigXml } from './apiUtils/bucket/bucketWebsite';
|
const { parseWebsiteConfigXml } = require('./apiUtils/bucket/bucketWebsite');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'bucketOwnerAction';
|
const requestType = 'bucketOwnerAction';
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ const requestType = 'bucketOwnerAction';
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function bucketPutWebsite(authInfo, request, log, callback) {
|
function bucketPutWebsite(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'bucketPutWebsite' });
|
log.debug('processing request', { method: 'bucketPutWebsite' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
|
@ -76,3 +76,5 @@ export default function bucketPutWebsite(authInfo, request, log, callback) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = bucketPutWebsite;
|
||||||
|
|
|
@ -1,17 +1,23 @@
|
||||||
import { errors } from 'arsenal';
|
const crypto = require('crypto');
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import crypto from 'crypto';
|
const { parseString } = require('xml2js');
|
||||||
import { parseString } from 'xml2js';
|
const { errors, versioning } = require('arsenal');
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
|
|
||||||
import data from '../data/wrapper';
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import constants from '../../constants';
|
|
||||||
import metadata from '../metadata/wrapper';
|
|
||||||
import services from '../services';
|
|
||||||
|
|
||||||
import { logger } from '../utilities/logger';
|
const data = require('../data/wrapper');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const constants = require('../../constants');
|
||||||
|
const { versioningPreprocessing, checkQueryVersionId }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const services = require('../services');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -79,7 +85,6 @@ const _convertToXml = xmlParams => {
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function completeMultipartUpload(authInfo, request, log, callback) {
|
function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'completeMultipartUpload' });
|
log.debug('processing request', { method: 'completeMultipartUpload' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -102,6 +107,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
hostname,
|
hostname,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const queryContainsVersionId = checkQueryVersionId(request.query);
|
||||||
|
if (queryContainsVersionId instanceof Error) {
|
||||||
|
return callback(queryContainsVersionId);
|
||||||
|
}
|
||||||
|
|
||||||
function parseXml(xmlToParse, next) {
|
function parseXml(xmlToParse, next) {
|
||||||
return parseString(xmlToParse, (err, result) => {
|
return parseString(xmlToParse, (err, result) => {
|
||||||
if (err || !result || !result.CompleteMultipartUpload
|
if (err || !result || !result.CompleteMultipartUpload
|
||||||
|
@ -113,8 +123,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async.waterfall([
|
return async.waterfall([
|
||||||
function waterfall1(next) {
|
function validateDestBucket(next) {
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
objectKey,
|
objectKey,
|
||||||
authInfo,
|
authInfo,
|
||||||
|
@ -122,11 +132,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
// Required permissions for this action
|
// Required permissions for this action
|
||||||
// at the destinationBucket level are same as objectPut
|
// at the destinationBucket level are same as objectPut
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
services.metadataValidateAuthorization(metadataValParams, next);
|
metadataValidateBucketAndObj(metadataValParams, log, next);
|
||||||
},
|
},
|
||||||
function waterfall2(destBucket, objMD, next) {
|
function validateMultipart(destBucket, objMD, next) {
|
||||||
services.metadataValidateMultipart(metadataValParams,
|
services.metadataValidateMultipart(metadataValParams,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -135,7 +144,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
return next(null, destBucket, objMD, mpuBucket);
|
return next(null, destBucket, objMD, mpuBucket);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall3(destBucket, objMD, mpuBucket, next) {
|
function parsePartsList(destBucket, objMD, mpuBucket, next) {
|
||||||
if (request.post) {
|
if (request.post) {
|
||||||
return parseXml(request.post, (err, jsonList) => {
|
return parseXml(request.post, (err, jsonList) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -146,7 +155,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
return next(errors.MalformedXML, destBucket);
|
return next(errors.MalformedXML, destBucket);
|
||||||
},
|
},
|
||||||
function waterfall4(destBucket, objMD, mpuBucket, jsonList, next) {
|
function retrieveParts(destBucket, objMD, mpuBucket, jsonList, next) {
|
||||||
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -157,8 +166,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
storedParts, jsonList);
|
storedParts, jsonList);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall5(destBucket, objMD, mpuBucket, storedParts, jsonList,
|
function processParts(destBucket, objMD, mpuBucket, storedParts,
|
||||||
next) {
|
jsonList, next) {
|
||||||
const storedPartsAsObjects = storedParts.map(item => ({
|
const storedPartsAsObjects = storedParts.map(item => ({
|
||||||
// In order to delete the part listing in the shadow
|
// In order to delete the part listing in the shadow
|
||||||
// bucket, need the full key
|
// bucket, need the full key
|
||||||
|
@ -211,7 +220,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (!location || typeof location !== 'object') {
|
if (!location || typeof location !== 'object') {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
extraPartLocations.push({ key: location.key });
|
extraPartLocations.push(location);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -323,7 +332,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
`overview${splitter}${objectKey}${splitter}${uploadId}`;
|
`overview${splitter}${objectKey}${splitter}${uploadId}`;
|
||||||
|
|
||||||
return metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
return metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
||||||
log, (err, storedMetadata) => {
|
{}, log, (err, storedMetadata) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
|
@ -333,7 +342,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
storedPartsAsObjects, extraPartLocations);
|
storedPartsAsObjects, extraPartLocations);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall6(destBucket, objMD, mpuBucket, storedMetadata,
|
function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata,
|
||||||
aggregateETag, calculatedSize, dataLocations, mpuOverviewKey,
|
aggregateETag, calculatedSize, dataLocations, mpuOverviewKey,
|
||||||
storedPartsAsObjects, extraPartLocations, next) {
|
storedPartsAsObjects, extraPartLocations, next) {
|
||||||
const metaHeaders = {};
|
const metaHeaders = {};
|
||||||
|
@ -363,59 +372,87 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
multipart: true,
|
multipart: true,
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
next(null, destBucket, dataLocations, metaStoreParams,
|
|
||||||
mpuBucket, mpuOverviewKey, aggregateETag,
|
|
||||||
storedPartsAsObjects, objMD, extraPartLocations);
|
|
||||||
},
|
|
||||||
function waterfall7(destinationBucket, dataLocations,
|
|
||||||
metaStoreParams, mpuBucket, mpuOverviewKey,
|
|
||||||
aggregateETag, storedPartsAsObjects, objMD,
|
|
||||||
extraPartLocations, next) {
|
|
||||||
const serverSideEncryption =
|
const serverSideEncryption =
|
||||||
destinationBucket.getServerSideEncryption();
|
destBucket.getServerSideEncryption();
|
||||||
let pseudoCipherBundle = null;
|
let pseudoCipherBundle = null;
|
||||||
if (serverSideEncryption) {
|
if (serverSideEncryption) {
|
||||||
pseudoCipherBundle = {
|
pseudoCipherBundle = {
|
||||||
algorithm: destinationBucket.getSseAlgorithm(),
|
algorithm: destBucket.getSseAlgorithm(),
|
||||||
masterKeyId: destinationBucket.getSseMasterKeyId(),
|
masterKeyId: destBucket.getSseMasterKeyId(),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
services.metadataStoreObject(destinationBucket.getName(),
|
return versioningPreprocessing(bucketName,
|
||||||
dataLocations, pseudoCipherBundle, metaStoreParams, err => {
|
destBucket, objectKey, objMD, log, (err, options) => {
|
||||||
|
if (err) {
|
||||||
|
// TODO: check AWS error when user requested a specific
|
||||||
|
// version before any versions have been put
|
||||||
|
const logLvl = err === errors.BadRequest ?
|
||||||
|
'debug' : 'error';
|
||||||
|
log[logLvl]('error getting versioning info', {
|
||||||
|
error: err,
|
||||||
|
method: 'versioningPreprocessing',
|
||||||
|
});
|
||||||
|
return next(err, destBucket);
|
||||||
|
}
|
||||||
|
const dataToDelete = options.dataToDelete;
|
||||||
|
metaStoreParams.versionId = options.versionId;
|
||||||
|
metaStoreParams.versioning = options.versioning;
|
||||||
|
metaStoreParams.isNull = options.isNull;
|
||||||
|
metaStoreParams.nullVersionId = options.nullVersionId;
|
||||||
|
return next(null, destBucket, dataLocations,
|
||||||
|
metaStoreParams, mpuBucket, mpuOverviewKey,
|
||||||
|
aggregateETag, storedPartsAsObjects, objMD,
|
||||||
|
extraPartLocations, pseudoCipherBundle, dataToDelete);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function storeAsNewObj(destinationBucket, dataLocations,
|
||||||
|
metaStoreParams, mpuBucket, mpuOverviewKey, aggregateETag,
|
||||||
|
storedPartsAsObjects, objMD, extraPartLocations, pseudoCipherBundle,
|
||||||
|
dataToDelete, next) {
|
||||||
|
return services.metadataStoreObject(destinationBucket.getName(),
|
||||||
|
dataLocations, pseudoCipherBundle, metaStoreParams,
|
||||||
|
(err, res) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
if (objMD && objMD.location) {
|
const generatedVersionId = res ? res.versionId : undefined;
|
||||||
const dataToDelete = Array.isArray(objMD.location) ?
|
// in cases where completing mpu overwrites a previous
|
||||||
objMD.location : [objMD.location];
|
// null version when versioning is suspended or versioning
|
||||||
data.batchDelete(dataToDelete, logger
|
// is not enabled, need to delete pre-existing data
|
||||||
.newRequestLoggerFromSerializedUids(log
|
if (dataToDelete) {
|
||||||
|
data.batchDelete(dataToDelete, request.method, null,
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
.getSerializedUids()));
|
.getSerializedUids()));
|
||||||
}
|
}
|
||||||
return next(null, mpuBucket, mpuOverviewKey,
|
return next(null, mpuBucket, mpuOverviewKey, aggregateETag,
|
||||||
aggregateETag, storedPartsAsObjects,
|
storedPartsAsObjects, extraPartLocations,
|
||||||
extraPartLocations, destinationBucket);
|
destinationBucket, generatedVersionId);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall8(mpuBucket, mpuOverviewKey, aggregateETag,
|
function deletePartsMetadata(mpuBucket, mpuOverviewKey, aggregateETag,
|
||||||
storedPartsAsObjects, extraPartLocations, destinationBucket, next) {
|
storedPartsAsObjects, extraPartLocations, destinationBucket,
|
||||||
|
generatedVersionId, next) {
|
||||||
const keysToDelete = storedPartsAsObjects.map(item => item.key);
|
const keysToDelete = storedPartsAsObjects.map(item => item.key);
|
||||||
keysToDelete.push(mpuOverviewKey);
|
keysToDelete.push(mpuOverviewKey);
|
||||||
services.batchDeleteObjectMetadata(mpuBucket.getName(),
|
services.batchDeleteObjectMetadata(mpuBucket.getName(),
|
||||||
keysToDelete, log, err => next(err, destinationBucket,
|
keysToDelete, log, err => next(err, destinationBucket,
|
||||||
aggregateETag));
|
aggregateETag, generatedVersionId));
|
||||||
if (extraPartLocations.length > 0) {
|
if (extraPartLocations.length > 0) {
|
||||||
data.batchDelete(extraPartLocations, logger
|
data.batchDelete(extraPartLocations, request.method, null,
|
||||||
.newRequestLoggerFromSerializedUids(log
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
.getSerializedUids()));
|
.getSerializedUids()));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
], (err, destinationBucket, aggregateETag) => {
|
], (err, destinationBucket, aggregateETag, generatedVersionId) => {
|
||||||
const corsHeaders =
|
const resHeaders =
|
||||||
collectCorsHeaders(request.headers.origin, request.method,
|
collectCorsHeaders(request.headers.origin, request.method,
|
||||||
destinationBucket);
|
destinationBucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, resHeaders);
|
||||||
|
}
|
||||||
|
if (generatedVersionId) {
|
||||||
|
resHeaders['x-amz-version-id'] =
|
||||||
|
versionIdUtils.encode(generatedVersionId);
|
||||||
}
|
}
|
||||||
xmlParams.ETag = `"${aggregateETag}"`;
|
xmlParams.ETag = `"${aggregateETag}"`;
|
||||||
const xml = _convertToXml(xmlParams);
|
const xml = _convertToXml(xmlParams);
|
||||||
|
@ -423,6 +460,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
});
|
});
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, resHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = completeMultipartUpload;
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import { findCorsRule,
|
const { findCorsRule, generateCorsResHeaders }
|
||||||
generateCorsResHeaders } from './apiUtils/object/corsResponse';
|
= require('./apiUtils/object/corsResponse');
|
||||||
// import { pushMetric } from '../utapi/utilities';
|
// const { pushMetric } = require('../utapi/utilities');
|
||||||
|
|
||||||
const requestType = 'objectGet';
|
const requestType = 'objectGet';
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ const customizedErrs = {
|
||||||
* with either error code or 200 response
|
* with either error code or 200 response
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function corsPreflight(request, log, callback) {
|
function corsPreflight(request, log, callback) {
|
||||||
log.debug('processing request', { method: 'corsPreflight' });
|
log.debug('processing request', { method: 'corsPreflight' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -81,3 +81,5 @@ export default function corsPreflight(request, log, callback) {
|
||||||
return callback(null, resHeaders);
|
return callback(null, resHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = corsPreflight;
|
||||||
|
|
|
@ -1,12 +1,19 @@
|
||||||
import UUID from 'node-uuid';
|
const UUID = require('node-uuid');
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
const { errors, s3validators } = require('arsenal');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
|
||||||
import { errors } from 'arsenal';
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import constants from '../../constants';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import services from '../services';
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
import utils from '../utils';
|
const constants = require('../../constants');
|
||||||
|
const services = require('../services');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const locationConstraintCheck
|
||||||
|
= require('./apiUtils/object/locationConstraintCheck');
|
||||||
|
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
||||||
|
.validateWebsiteHeader;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Sample xml response:
|
Sample xml response:
|
||||||
|
@ -52,7 +59,6 @@ const _convertToXml = xmlParams => {
|
||||||
* @return {undefined} calls callback from router
|
* @return {undefined} calls callback from router
|
||||||
* with err and result as arguments
|
* with err and result as arguments
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function initiateMultipartUpload(authInfo, request, log, callback) {
|
function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'initiateMultipartUpload' });
|
log.debug('processing request', { method: 'initiateMultipartUpload' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -65,13 +71,20 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
// multipart upload object with a key containing the splitter.
|
// multipart upload object with a key containing the splitter.
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
|
if (!validateWebsiteHeader(websiteRedirectHeader)) {
|
||||||
const err = errors.InvalidRedirectLocation;
|
const err = errors.InvalidRedirectLocation;
|
||||||
log.debug('invalid x-amz-website-redirect-location' +
|
log.debug('invalid x-amz-website-redirect-location' +
|
||||||
`value ${websiteRedirectHeader}`, { error: err });
|
`value ${websiteRedirectHeader}`, { error: err });
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
const metaHeaders = utils.getMetaHeaders(request.headers);
|
const metaHeaders = getMetaHeaders(request.headers);
|
||||||
|
if (metaHeaders instanceof Error) {
|
||||||
|
log.debug('user metadata validation failed', {
|
||||||
|
error: metaHeaders,
|
||||||
|
method: 'createAndStoreObject',
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(metaHeaders));
|
||||||
|
}
|
||||||
// Generate uniqueID without dashes so that routing not messed up
|
// Generate uniqueID without dashes so that routing not messed up
|
||||||
const uploadId = UUID.v4().replace(/-/g, '');
|
const uploadId = UUID.v4().replace(/-/g, '');
|
||||||
// TODO: Add this as a utility function for all object put requests
|
// TODO: Add this as a utility function for all object put requests
|
||||||
|
@ -91,7 +104,6 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
bucketName,
|
bucketName,
|
||||||
// Required permissions for this action are same as objectPut
|
// Required permissions for this action are same as objectPut
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const accountCanonicalID = authInfo.getCanonicalID();
|
const accountCanonicalID = authInfo.getCanonicalID();
|
||||||
let initiatorID = accountCanonicalID;
|
let initiatorID = accountCanonicalID;
|
||||||
|
@ -100,6 +112,14 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
initiatorID = authInfo.getArn();
|
initiatorID = authInfo.getArn();
|
||||||
initiatorDisplayName = authInfo.getIAMdisplayName();
|
initiatorDisplayName = authInfo.getIAMdisplayName();
|
||||||
}
|
}
|
||||||
|
const xmlParams = {
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
uploadId,
|
||||||
|
};
|
||||||
|
const xml = _convertToXml(xmlParams);
|
||||||
|
|
||||||
|
function _storetheMPObject(destinationBucket, corsHeaders) {
|
||||||
const metadataStoreParams = {
|
const metadataStoreParams = {
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
|
@ -111,7 +131,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
ownerID: accountCanonicalID,
|
ownerID: accountCanonicalID,
|
||||||
ownerDisplayName: authInfo.getAccountDisplayName(),
|
ownerDisplayName: authInfo.getAccountDisplayName(),
|
||||||
// If initiator is an IAM user, the initiatorID is the ARN.
|
// If initiator is an IAM user, the initiatorID is the ARN.
|
||||||
// Otherwise, it is the same as the ownerID (the account canonicalID)
|
// Otherwise, it is the same as the ownerID
|
||||||
|
// (the account canonicalID)
|
||||||
initiatorID,
|
initiatorID,
|
||||||
// If initiator is an IAM user, the initiatorDisplayName is the
|
// If initiator is an IAM user, the initiatorDisplayName is the
|
||||||
// IAM user's displayname.
|
// IAM user's displayname.
|
||||||
|
@ -119,14 +140,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
initiatorDisplayName,
|
initiatorDisplayName,
|
||||||
splitter: constants.splitter,
|
splitter: constants.splitter,
|
||||||
};
|
};
|
||||||
const xmlParams = {
|
|
||||||
bucketName,
|
|
||||||
objectKey,
|
|
||||||
uploadId,
|
|
||||||
};
|
|
||||||
const xml = _convertToXml(xmlParams);
|
|
||||||
|
|
||||||
function _storetheMPObject(destinationBucket, corsHeaders) {
|
|
||||||
const serverSideEncryption =
|
const serverSideEncryption =
|
||||||
destinationBucket.getServerSideEncryption();
|
destinationBucket.getServerSideEncryption();
|
||||||
let cipherBundle = null;
|
let cipherBundle = null;
|
||||||
|
@ -136,7 +150,17 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
masterKeyId: serverSideEncryption.masterKeyId,
|
masterKeyId: serverSideEncryption.masterKeyId,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
services.getMPUBucket(destinationBucket, bucketName, log,
|
const backendInfoObj = locationConstraintCheck(request, null,
|
||||||
|
destinationBucket, log);
|
||||||
|
if (backendInfoObj.err) {
|
||||||
|
return process.nextTick(() => {
|
||||||
|
callback(backendInfoObj.err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
metadataStoreParams.controllingLocationConstraint =
|
||||||
|
backendInfoObj.controllingLC;
|
||||||
|
|
||||||
|
return services.getMPUBucket(destinationBucket, bucketName, log,
|
||||||
(err, MPUbucket) => {
|
(err, MPUbucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error getting MPUbucket', {
|
log.trace('error getting MPUbucket', {
|
||||||
|
@ -168,14 +192,14 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, destinationBucket);
|
request.method, destinationBucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'services.metadataValidateAuthorization',
|
method: 'metadataValidateBucketAndObj',
|
||||||
});
|
});
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
|
@ -187,11 +211,13 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
if (destinationBucket.hasTransientFlag() ||
|
if (destinationBucket.hasTransientFlag() ||
|
||||||
destinationBucket.hasDeletedFlag()) {
|
destinationBucket.hasDeletedFlag()) {
|
||||||
log.trace('transient or deleted flag so cleaning up bucket');
|
log.trace('transient or deleted flag so cleaning ' +
|
||||||
|
'up bucket');
|
||||||
return cleanUpBucket(destinationBucket,
|
return cleanUpBucket(destinationBucket,
|
||||||
accountCanonicalID, log, err => {
|
accountCanonicalID, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error cleaning up bucket with flag',
|
log.debug('error cleaning up bucket ' +
|
||||||
|
'with flag',
|
||||||
{ error: err,
|
{ error: err,
|
||||||
transientFlag:
|
transientFlag:
|
||||||
destinationBucket.hasTransientFlag(),
|
destinationBucket.hasTransientFlag(),
|
||||||
|
@ -212,3 +238,5 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = initiateMultipartUpload;
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
import async from 'async';
|
const querystring = require('querystring');
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
const async = require('async');
|
||||||
import querystring from 'querystring';
|
|
||||||
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
|
const constants = require('../../constants');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const services = require('../services');
|
||||||
|
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import constants from '../../constants';
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
|
||||||
import services from '../services';
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
import { errors } from 'arsenal';
|
|
||||||
|
|
||||||
// Sample XML response:
|
// Sample XML response:
|
||||||
/*
|
/*
|
||||||
|
@ -159,8 +161,7 @@ const _convertToXml = xmlParams => {
|
||||||
* with either error code or xml response body
|
* with either error code or xml response body
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function listMultipartUploads(authInfo,
|
function listMultipartUploads(authInfo, request, log, callback) {
|
||||||
request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'listMultipartUploads' });
|
log.debug('processing request', { method: 'listMultipartUploads' });
|
||||||
const query = request.query;
|
const query = request.query;
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -178,14 +179,13 @@ export default function listMultipartUploads(authInfo,
|
||||||
// the authorization to list multipart uploads is the same
|
// the authorization to list multipart uploads is the same
|
||||||
// as listing objects in a bucket.
|
// as listing objects in a bucket.
|
||||||
requestType: 'bucketGet',
|
requestType: 'bucketGet',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function waterfall1(next) {
|
function waterfall1(next) {
|
||||||
// Check final destination bucket for authorization rather
|
// Check final destination bucket for authorization rather
|
||||||
// than multipart upload bucket
|
// than multipart upload bucket
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => next(err, bucket));
|
(err, bucket) => next(err, bucket));
|
||||||
},
|
},
|
||||||
function getMPUBucket(bucket, next) {
|
function getMPUBucket(bucket, next) {
|
||||||
|
@ -213,7 +213,7 @@ export default function listMultipartUploads(authInfo,
|
||||||
maxKeys: maxUploads,
|
maxKeys: maxUploads,
|
||||||
prefix: `overview${splitter}${prefix}`,
|
prefix: `overview${splitter}${prefix}`,
|
||||||
queryPrefixLength: prefix.length,
|
queryPrefixLength: prefix.length,
|
||||||
listingType: 'multipartuploads',
|
listingType: 'MPU',
|
||||||
splitter,
|
splitter,
|
||||||
};
|
};
|
||||||
services.getMultipartUploadListing(mpuBucketName, listingParams,
|
services.getMultipartUploadListing(mpuBucketName, listingParams,
|
||||||
|
@ -242,3 +242,5 @@ export default function listMultipartUploads(authInfo,
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = listMultipartUploads;
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
import async from 'async';
|
const querystring = require('querystring');
|
||||||
import querystring from 'querystring';
|
const async = require('async');
|
||||||
|
|
||||||
|
const constants = require('../../constants');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const services = require('../services');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import constants from '../../constants';
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
|
||||||
import services from '../services';
|
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
import { errors } from 'arsenal';
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml response:
|
Format of xml response:
|
||||||
|
@ -67,7 +69,7 @@ function buildXML(xmlParams, xml, encodingFn) {
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function listParts(authInfo, request, log, callback) {
|
function listParts(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'listParts' });
|
log.debug('processing request', { method: 'listParts' });
|
||||||
|
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -91,7 +93,6 @@ export default function listParts(authInfo, request, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
requestType: 'listParts',
|
requestType: 'listParts',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
// params are the same as validating at the MPU level
|
// params are the same as validating at the MPU level
|
||||||
|
@ -106,7 +107,7 @@ export default function listParts(authInfo, request, log, callback) {
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function checkDestBucketVal(next) {
|
function checkDestBucketVal(next) {
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket, null);
|
return next(err, destinationBucket, null);
|
||||||
|
@ -125,6 +126,7 @@ export default function listParts(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall2(destBucket, next) {
|
function waterfall2(destBucket, next) {
|
||||||
|
metadataValMPUparams.log = log;
|
||||||
services.metadataValidateMultipart(metadataValMPUparams,
|
services.metadataValidateMultipart(metadataValMPUparams,
|
||||||
(err, mpuBucket, mpuOverview) => {
|
(err, mpuBucket, mpuOverview) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -233,3 +235,5 @@ export default function listParts(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = listParts;
|
||||||
|
|
|
@ -1,17 +1,23 @@
|
||||||
import crypto from 'crypto';
|
const crypto = require('crypto');
|
||||||
|
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import { auth, errors } from 'arsenal';
|
const { parseString } = require('xml2js');
|
||||||
import { parseString } from 'xml2js';
|
const { auth, errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
import escapeForXML from '../utilities/escapeForXML';
|
const escapeForXML = require('../utilities/escapeForXML');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import services from '../services';
|
const services = require('../services');
|
||||||
import vault from '../auth/vault';
|
const vault = require('../auth/vault');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
|
const { preprocessingVersioningDelete }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||||
|
const { metadataGetObject } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -50,8 +56,12 @@ import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
||||||
* @param {boolean} quietSetting - true if xml should just include error list
|
* @param {boolean} quietSetting - true if xml should just include error list
|
||||||
* and false if should include deleted list and error list
|
* and false if should include deleted list and error list
|
||||||
* @param {object []} errorResults - list of error result objects with each
|
* @param {object []} errorResults - list of error result objects with each
|
||||||
* object containing -- key: objectName, error: arsenal error
|
* object containing -- entry: { key, versionId }, error: arsenal error
|
||||||
* @param {string []} deleted - list of object keys deleted
|
* @param {object []} deleted - list of object deleted, an object has the format
|
||||||
|
* object: { entry, isDeleteMarker, isDeletingDeleteMarker }
|
||||||
|
* object.entry : above
|
||||||
|
* object.newDeleteMarker: if deletion resulted in delete marker
|
||||||
|
* object.isDeletingDeleteMarker: if a delete marker was deleted
|
||||||
* @return {string} xml string
|
* @return {string} xml string
|
||||||
*/
|
*/
|
||||||
function _formatXML(quietSetting, errorResults, deleted) {
|
function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
|
@ -59,9 +69,17 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
errorResults.forEach(errorObj => {
|
errorResults.forEach(errorObj => {
|
||||||
errorXML.push(
|
errorXML.push(
|
||||||
'<Error>',
|
'<Error>',
|
||||||
'<Key>', escapeForXML(errorObj.key), '</Key>',
|
'<Key>', escapeForXML(errorObj.entry.key), '</Key>',
|
||||||
'<Code>', errorObj.error.message, '</Code>',
|
'<Code>', escapeForXML(errorObj.error.message), '</Code>');
|
||||||
'<Message>', errorObj.error.description, '</Message>',
|
if (errorObj.entry.versionId) {
|
||||||
|
const version = errorObj.entry.versionId === 'null' ?
|
||||||
|
'null' : escapeForXML(errorObj.entry.versionId);
|
||||||
|
errorXML.push('<VersionId>', version, '</VersionId>');
|
||||||
|
}
|
||||||
|
errorXML.push(
|
||||||
|
'<Message>',
|
||||||
|
escapeForXML(errorObj.error.description),
|
||||||
|
'</Message>',
|
||||||
'</Error>'
|
'</Error>'
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
@ -79,12 +97,34 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
}
|
}
|
||||||
const deletedXML = [];
|
const deletedXML = [];
|
||||||
deleted.forEach(objKey => {
|
deleted.forEach(version => {
|
||||||
|
const isDeleteMarker = version.isDeleteMarker;
|
||||||
|
const deleteMarkerVersionId = version.deleteMarkerVersionId;
|
||||||
|
// if deletion resulted in new delete marker or deleting a delete marker
|
||||||
deletedXML.push(
|
deletedXML.push(
|
||||||
'<Deleted>',
|
'<Deleted>',
|
||||||
'<Key>', escapeForXML(objKey), '</Key>',
|
'<Key>',
|
||||||
'</Deleted>'
|
escapeForXML(version.entry.key),
|
||||||
|
'</Key>'
|
||||||
);
|
);
|
||||||
|
if (version.entry.versionId) {
|
||||||
|
deletedXML.push(
|
||||||
|
'<VersionId>',
|
||||||
|
escapeForXML(version.entry.versionId),
|
||||||
|
'</VersionId>'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (isDeleteMarker) {
|
||||||
|
deletedXML.push(
|
||||||
|
'<DeleteMarker>',
|
||||||
|
isDeleteMarker,
|
||||||
|
'</DeleteMarker>',
|
||||||
|
'<DeleteMarkerVersionId>',
|
||||||
|
deleteMarkerVersionId,
|
||||||
|
'</DeleteMarkerVersionId>'
|
||||||
|
);
|
||||||
|
}
|
||||||
|
deletedXML.push('</Deleted>');
|
||||||
});
|
});
|
||||||
xml[2] = deletedXML.join('');
|
xml[2] = deletedXML.join('');
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
|
@ -98,15 +138,34 @@ function _parseXml(xmlToParse, next) {
|
||||||
const json = result.Delete;
|
const json = result.Delete;
|
||||||
// not quiet is the default if nothing specified
|
// not quiet is the default if nothing specified
|
||||||
const quietSetting = json.Quiet && json.Quiet[0] === 'true';
|
const quietSetting = json.Quiet && json.Quiet[0] === 'true';
|
||||||
// format of json is {"Object":[{"Key":["test1"]},{"Key":["test2"]}]}
|
// format of json is
|
||||||
const objects = json.Object.map(item => item.Key[0]);
|
// {"Object":[
|
||||||
|
// {"Key":["test1"],"VersionId":["vid"]},
|
||||||
|
// {"Key":["test2"]}
|
||||||
|
// ]}
|
||||||
|
const objects = [];
|
||||||
|
for (let i = 0; i < json.Object.length; i++) {
|
||||||
|
const item = json.Object[i];
|
||||||
|
if (!item.Key) {
|
||||||
|
return next(errors.MalformedXML);
|
||||||
|
}
|
||||||
|
const object = { key: item.Key[0] };
|
||||||
|
if (item.VersionId) {
|
||||||
|
object.versionId = item.VersionId[0];
|
||||||
|
}
|
||||||
|
objects.push(object);
|
||||||
|
}
|
||||||
return next(null, quietSetting, objects);
|
return next(null, quietSetting, objects);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gets object metadata and deletes object
|
* gets object metadata and deletes object
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {string} canonicalID - canonicalId of requester
|
||||||
|
* @param {object} request - http request
|
||||||
* @param {string} bucketName - bucketName
|
* @param {string} bucketName - bucketName
|
||||||
|
* @param {BucketInfo} bucket - bucket
|
||||||
* @param {boolean} quietSetting - true if xml should just include error list
|
* @param {boolean} quietSetting - true if xml should just include error list
|
||||||
* and false if should include deleted list and error list
|
* and false if should include deleted list and error list
|
||||||
* @param {object []} errorResults - list of error result objects with each
|
* @param {object []} errorResults - list of error result objects with each
|
||||||
|
@ -118,58 +177,114 @@ function _parseXml(xmlToParse, next) {
|
||||||
* @callback called with (err, quietSetting, errorResults, numOfObjects,
|
* @callback called with (err, quietSetting, errorResults, numOfObjects,
|
||||||
* successfullyDeleted, totalContentLengthDeleted)
|
* successfullyDeleted, totalContentLengthDeleted)
|
||||||
*/
|
*/
|
||||||
export function getObjMetadataAndDelete(bucketName, quietSetting,
|
function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
errorResults, inPlay, log, next) {
|
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
|
||||||
const successfullyDeleted = [];
|
const successfullyDeleted = [];
|
||||||
let totalContentLengthDeleted = 0;
|
let totalContentLengthDeleted = 0;
|
||||||
let numOfObjects = 0;
|
let numOfObjectsRemoved = 0;
|
||||||
// for obj deletes, no need to check acl's at object level
|
const skipError = new Error('skip');
|
||||||
// (authority is at the bucket level for obj deletes)
|
|
||||||
|
|
||||||
// doing 5 requests at a time. note that the data wrapper
|
// doing 5 requests at a time. note that the data wrapper
|
||||||
// will do 5 parallel requests to data backend to delete parts
|
// will do 5 parallel requests to data backend to delete parts
|
||||||
return async.forEachLimit(inPlay, 5, (key, moveOn) => {
|
return async.forEachLimit(inPlay, 5, (entry, moveOn) => {
|
||||||
metadata.getObjectMD(bucketName, key, log, (err, objMD) => {
|
async.waterfall([
|
||||||
|
callback => {
|
||||||
|
let decodedVersionId;
|
||||||
|
if (entry.versionId) {
|
||||||
|
decodedVersionId = entry.versionId === 'null' ?
|
||||||
|
'null' : versionIdUtils.decode(entry.versionId);
|
||||||
|
}
|
||||||
|
if (decodedVersionId instanceof Error) {
|
||||||
|
return callback(errors.NoSuchVersion);
|
||||||
|
}
|
||||||
|
return callback(null, decodedVersionId);
|
||||||
|
},
|
||||||
|
// for obj deletes, no need to check acl's at object level
|
||||||
|
// (authority is at the bucket level for obj deletes)
|
||||||
|
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
||||||
|
versionId, log, (err, objMD) => {
|
||||||
// if general error from metadata return error
|
// if general error from metadata return error
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.NoSuchKey) {
|
||||||
log.error('error getting object MD', { error: err, key });
|
return callback(err);
|
||||||
errorResults.push({
|
|
||||||
key,
|
|
||||||
error: err,
|
|
||||||
});
|
|
||||||
return moveOn();
|
|
||||||
}
|
}
|
||||||
// if particular key does not exist, AWS returns success
|
|
||||||
// for key so add to successfullyDeleted list and move on
|
|
||||||
if (err && err.NoSuchKey) {
|
if (err && err.NoSuchKey) {
|
||||||
successfullyDeleted.push(key);
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
// To adhere to AWS behavior, create a delete marker
|
||||||
|
// if trying to delete an object that does not exist
|
||||||
|
// when versioning has been configured
|
||||||
|
if (verCfg && !entry.versionId) {
|
||||||
|
log.debug('trying to delete specific version ' +
|
||||||
|
' that does not exist');
|
||||||
|
return callback(null, objMD, versionId);
|
||||||
|
}
|
||||||
|
// otherwise if particular key does not exist, AWS
|
||||||
|
// returns success for key so add to successfullyDeleted
|
||||||
|
// list and move on
|
||||||
|
successfullyDeleted.push({ entry });
|
||||||
|
return callback(skipError);
|
||||||
|
}
|
||||||
|
return callback(null, objMD, versionId);
|
||||||
|
}),
|
||||||
|
(objMD, versionId, callback) =>
|
||||||
|
preprocessingVersioningDelete(bucketName, bucket, objMD,
|
||||||
|
versionId, log, (err, options) => callback(err, options,
|
||||||
|
objMD)),
|
||||||
|
(options, objMD, callback) => {
|
||||||
|
const deleteInfo = {};
|
||||||
|
if (options && options.deleteData) {
|
||||||
|
deleteInfo.deleted = true;
|
||||||
|
return services.deleteObject(bucketName, objMD,
|
||||||
|
entry.key, options, log, err =>
|
||||||
|
callback(err, objMD, deleteInfo));
|
||||||
|
}
|
||||||
|
deleteInfo.newDeleteMarker = true;
|
||||||
|
// This call will create a delete-marker
|
||||||
|
return createAndStoreObject(bucketName, bucket, entry.key,
|
||||||
|
objMD, authInfo, canonicalID, null, request,
|
||||||
|
deleteInfo.newDeleteMarker, null, log, (err, result) =>
|
||||||
|
callback(err, objMD, deleteInfo, result.versionId));
|
||||||
|
},
|
||||||
|
], (err, objMD, deleteInfo, versionId) => {
|
||||||
|
if (err === skipError) {
|
||||||
|
return moveOn();
|
||||||
|
} else if (err) {
|
||||||
|
log.error('error deleting object', { error: err, entry });
|
||||||
|
errorResults.push({ entry, error: err });
|
||||||
return moveOn();
|
return moveOn();
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objMD, key, log,
|
if (deleteInfo.deleted && objMD['content-length']) {
|
||||||
err => {
|
numOfObjectsRemoved++;
|
||||||
if (err) {
|
totalContentLengthDeleted += objMD['content-length'];
|
||||||
log.error('error deleting object', { error: err, key });
|
|
||||||
errorResults.push({
|
|
||||||
key,
|
|
||||||
error: err,
|
|
||||||
});
|
|
||||||
return moveOn();
|
|
||||||
}
|
}
|
||||||
if (objMD['content-length']) {
|
let isDeleteMarker;
|
||||||
totalContentLengthDeleted +=
|
let deleteMarkerVersionId;
|
||||||
objMD['content-length'];
|
// - If trying to delete an object that does not exist (if a new
|
||||||
|
// delete marker was created)
|
||||||
|
// - Or if an object exists but no version was specified
|
||||||
|
// return DeleteMarkerVersionId equals the versionID of the marker
|
||||||
|
// you just generated and DeleteMarker tag equals true
|
||||||
|
if (deleteInfo.newDeleteMarker) {
|
||||||
|
isDeleteMarker = true;
|
||||||
|
deleteMarkerVersionId = versionIdUtils.encode(versionId);
|
||||||
|
// In this case we are putting a new object (i.e., the delete
|
||||||
|
// marker), so we decrement the numOfObjectsRemoved value.
|
||||||
|
numOfObjectsRemoved--;
|
||||||
|
// If trying to delete a delete marker, DeleteMarkerVersionId equals
|
||||||
|
// deleteMarker's versionID and DeleteMarker equals true
|
||||||
|
} else if (objMD && objMD.isDeleteMarker) {
|
||||||
|
isDeleteMarker = true;
|
||||||
|
deleteMarkerVersionId = entry.versionId;
|
||||||
}
|
}
|
||||||
numOfObjects++;
|
successfullyDeleted.push({ entry, isDeleteMarker,
|
||||||
successfullyDeleted.push(key);
|
deleteMarkerVersionId });
|
||||||
return moveOn();
|
return moveOn();
|
||||||
});
|
});
|
||||||
});
|
|
||||||
},
|
},
|
||||||
// end of forEach func
|
// end of forEach func
|
||||||
err => {
|
err => {
|
||||||
log.trace('finished deleting objects', { numOfObjects });
|
log.trace('finished deleting objects', { numOfObjectsRemoved });
|
||||||
return next(err, quietSetting, errorResults, numOfObjects,
|
return next(err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||||
successfullyDeleted, totalContentLengthDeleted);
|
successfullyDeleted, totalContentLengthDeleted, bucket);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +292,7 @@ export function getObjMetadataAndDelete(bucketName, quietSetting,
|
||||||
* multiObjectDelete - Delete multiple objects
|
* multiObjectDelete - Delete multiple objects
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
* @param {object} request - http.IncomingMessage as modified by
|
* @param {object} request - http.IncomingMessage as modified by
|
||||||
* lib/utils.normalizeRequest and routes/routePOST.js
|
* lib/utils and routes/routePOST.js
|
||||||
* @param {object} request.headers - request headers
|
* @param {object} request.headers - request headers
|
||||||
* @param {object} request.query - query from request
|
* @param {object} request.query - query from request
|
||||||
* @param {string} request.post - concatenation of request body
|
* @param {string} request.post - concatenation of request body
|
||||||
|
@ -188,7 +303,6 @@ export function getObjMetadataAndDelete(bucketName, quietSetting,
|
||||||
* @param {function} callback - callback to server
|
* @param {function} callback - callback to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function multiObjectDelete(authInfo, request, log, callback) {
|
function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'multiObjectDelete' });
|
log.debug('processing request', { method: 'multiObjectDelete' });
|
||||||
if (!request.post) {
|
if (!request.post) {
|
||||||
|
@ -205,7 +319,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function parseXML(next) {
|
function parseXML(next) {
|
||||||
return _parseXml(request.post, (err, quietSetting, objects) => {
|
return _parseXml(request.post,
|
||||||
|
(err, quietSetting, objects) => {
|
||||||
if (err || objects.length < 1 || objects.length > 1000) {
|
if (err || objects.length < 1 || objects.length > 1000) {
|
||||||
return next(errors.MalformedXML);
|
return next(errors.MalformedXML);
|
||||||
}
|
}
|
||||||
|
@ -213,11 +328,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkPolicies(quietSetting, objects, next) {
|
function checkPolicies(quietSetting, objects, next) {
|
||||||
// track the error results for any keys with
|
|
||||||
// an error response
|
|
||||||
const errorResults = [];
|
|
||||||
// track keys that are still on track to be deleted
|
// track keys that are still on track to be deleted
|
||||||
const inPlay = [];
|
const inPlay = [];
|
||||||
|
const errorResults = [];
|
||||||
// if request from account, no need to check policies
|
// if request from account, no need to check policies
|
||||||
// all objects are inPlay so send array of object keys
|
// all objects are inPlay so send array of object keys
|
||||||
// as inPlay argument
|
// as inPlay argument
|
||||||
|
@ -246,16 +359,22 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
signatureAge: authParams.params.data.signatureAge,
|
signatureAge: authParams.params.data.signatureAge,
|
||||||
},
|
},
|
||||||
parameterize: {
|
parameterize: {
|
||||||
specificResource: objects,
|
// eslint-disable-next-line
|
||||||
|
specificResource: objects.map(entry => {
|
||||||
|
return {
|
||||||
|
key: entry.key,
|
||||||
|
versionId: entry.versionId,
|
||||||
|
};
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
||||||
log, (err, authorizationResults) => {
|
log, (err, authorizationResults) => {
|
||||||
// there were no policies so received a blanket AccessDenied
|
// there were no policies so received a blanket AccessDenied
|
||||||
if (err && err.AccessDenied) {
|
if (err && err.AccessDenied) {
|
||||||
objects.forEach(key => {
|
objects.forEach(entry => {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
key,
|
entry,
|
||||||
error: errors.AccessDenied });
|
error: errors.AccessDenied });
|
||||||
});
|
});
|
||||||
// send empty array for inPlay
|
// send empty array for inPlay
|
||||||
|
@ -280,19 +399,23 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
for (let i = 0; i < authorizationResults.length; i++) {
|
for (let i = 0; i < authorizationResults.length; i++) {
|
||||||
const result = authorizationResults[i];
|
const result = authorizationResults[i];
|
||||||
// result is { isAllowed: true,
|
// result is { isAllowed: true,
|
||||||
// arn: arn:aws:s3:::bucket/object} unless not allowed
|
// arn: arn:aws:s3:::bucket/object,
|
||||||
|
// versionId: sampleversionId } unless not allowed
|
||||||
// in which case no isAllowed key will be present
|
// in which case no isAllowed key will be present
|
||||||
const slashIndex = result.arn.indexOf('/');
|
const slashIndex = result.arn.indexOf('/');
|
||||||
if (slashIndex === -1) {
|
if (slashIndex === -1) {
|
||||||
log.error('wrong arn format from vault');
|
log.error('wrong arn format from vault');
|
||||||
return next(errors.InternalError);
|
return next(errors.InternalError);
|
||||||
}
|
}
|
||||||
const key = result.arn.slice(slashIndex + 1);
|
const entry = {
|
||||||
|
key: result.arn.slice(slashIndex + 1),
|
||||||
|
versionId: result.versionId,
|
||||||
|
};
|
||||||
if (result.isAllowed) {
|
if (result.isAllowed) {
|
||||||
inPlay.push(key);
|
inPlay.push(entry);
|
||||||
} else {
|
} else {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
key,
|
entry,
|
||||||
error: errors.AccessDenied,
|
error: errors.AccessDenied,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -327,9 +450,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
log.trace("access denied due to bucket acl's");
|
log.trace("access denied due to bucket acl's");
|
||||||
// if access denied at the bucket level, no access for
|
// if access denied at the bucket level, no access for
|
||||||
// any of the objects so all results will be error results
|
// any of the objects so all results will be error results
|
||||||
inPlay.forEach(key => {
|
inPlay.forEach(entry => {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
key,
|
entry,
|
||||||
error: errors.AccessDenied,
|
error: errors.AccessDenied,
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -344,16 +467,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
},
|
},
|
||||||
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
||||||
bucket, next) {
|
bucket, next) {
|
||||||
return getObjMetadataAndDelete(bucketName, quietSetting,
|
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
errorResults, inPlay, log, (err, quietSetting, errorResults,
|
bucketName, bucket, quietSetting, errorResults, inPlay,
|
||||||
numOfObjects, successfullyDeleted,
|
log, next);
|
||||||
totalContentLengthDeleted) => {
|
|
||||||
next(err, quietSetting, errorResults,
|
|
||||||
numOfObjects, successfullyDeleted,
|
|
||||||
totalContentLengthDeleted, bucket);
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
], (err, quietSetting, errorResults, numOfObjects,
|
], (err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||||
successfullyDeleted, totalContentLengthDeleted, bucket) => {
|
successfullyDeleted, totalContentLengthDeleted, bucket) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
|
@ -366,8 +484,13 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
byteLength: totalContentLengthDeleted,
|
byteLength: totalContentLengthDeleted,
|
||||||
numberOfObjects: numOfObjects,
|
numberOfObjects: numOfObjectsRemoved,
|
||||||
});
|
});
|
||||||
return callback(null, xml, corsHeaders);
|
return callback(null, xml, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
getObjMetadataAndDelete,
|
||||||
|
multiObjectDelete,
|
||||||
|
};
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
import config from '../Config';
|
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import data from '../data/wrapper';
|
const data = require('../data/wrapper');
|
||||||
import services from '../services';
|
const services = require('../services');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* multipartDelete - DELETE an open multipart upload from a bucket
|
* multipartDelete - DELETE an open multipart upload from a bucket
|
||||||
|
@ -19,7 +20,6 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* @return {undefined} calls callback from router
|
* @return {undefined} calls callback from router
|
||||||
* with err, result and responseMetaHeaders as arguments
|
* with err, result and responseMetaHeaders as arguments
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function multipartDelete(authInfo, request, log, callback) {
|
function multipartDelete(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'multipartDelete' });
|
log.debug('processing request', { method: 'multipartDelete' });
|
||||||
|
|
||||||
|
@ -32,7 +32,6 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
uploadId,
|
uploadId,
|
||||||
requestType: 'deleteMPU',
|
requestType: 'deleteMPU',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
// For validating the request at the destinationBucket level
|
// For validating the request at the destinationBucket level
|
||||||
// params are the same as validating at the MPU level
|
// params are the same as validating at the MPU level
|
||||||
|
@ -42,7 +41,7 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
function checkDestBucketVal(next) {
|
function checkDestBucketVal(next) {
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
|
@ -61,6 +60,7 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkMPUval(destBucket, next) {
|
function checkMPUval(destBucket, next) {
|
||||||
|
metadataValParams.log = log;
|
||||||
services.metadataValidateMultipart(metadataValParams,
|
services.metadataValidateMultipart(metadataValParams,
|
||||||
(err, mpuBucket, mpuOverviewArray) => {
|
(err, mpuBucket, mpuOverviewArray) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -136,8 +136,7 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
// if legacy behavior is enabled for 'us-east-1' and
|
// if legacy behavior is enabled for 'us-east-1' and
|
||||||
// request is from 'us-east-1', return 404 instead of
|
// request is from 'us-east-1', return 404 instead of
|
||||||
// 204
|
// 204
|
||||||
if (config.usEastBehavior &&
|
if (isLegacyAWSBehavior(locationConstraint)) {
|
||||||
locationConstraint === 'us-east-1') {
|
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
}
|
}
|
||||||
// otherwise ignore error and return 204 status code
|
// otherwise ignore error and return 204 status code
|
||||||
|
@ -146,3 +145,5 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = multipartDelete;
|
||||||
|
|
|
@ -1,15 +1,25 @@
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import { errors } from 'arsenal';
|
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { errors, versioning, s3validators } = require('arsenal');
|
||||||
import data from '../data/wrapper';
|
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
|
||||||
import kms from '../kms/wrapper';
|
|
||||||
import { logger } from '../utilities/logger';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import services from '../services';
|
const locationConstraintCheck
|
||||||
import utils from '../utils';
|
= require('./apiUtils/object/locationConstraintCheck');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const { checkQueryVersionId, versioningPreprocessing }
|
||||||
import { pushMetric } from '../utapi/utilities';
|
= require('./apiUtils/object/versioning');
|
||||||
import removeAWSChunked from './apiUtils/object/removeAWSChunked';
|
const data = require('../data/wrapper');
|
||||||
|
const kms = require('../kms/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const services = require('../services');
|
||||||
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
|
||||||
|
.validateWebsiteHeader;
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Preps metadata to be saved (based on copy or replace request header)
|
* Preps metadata to be saved (based on copy or replace request header)
|
||||||
|
@ -30,6 +40,13 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
|
||||||
if (whichMetadata !== 'COPY' && whichMetadata !== 'REPLACE') {
|
if (whichMetadata !== 'COPY' && whichMetadata !== 'REPLACE') {
|
||||||
return { error: errors.InvalidArgument };
|
return { error: errors.InvalidArgument };
|
||||||
}
|
}
|
||||||
|
let whichTagging = headers['x-amz-tagging-directive'];
|
||||||
|
// Default is COPY
|
||||||
|
whichTagging = whichTagging === undefined ? 'COPY' : whichTagging;
|
||||||
|
if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') {
|
||||||
|
return { error: errors.InvalidArgument
|
||||||
|
.customizeDescription('Unknown tagging directive') };
|
||||||
|
}
|
||||||
const overrideMetadata = {};
|
const overrideMetadata = {};
|
||||||
if (headers['x-amz-server-side-encryption']) {
|
if (headers['x-amz-server-side-encryption']) {
|
||||||
overrideMetadata['x-amz-server-side-encryption'] =
|
overrideMetadata['x-amz-server-side-encryption'] =
|
||||||
|
@ -56,8 +73,28 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
|
||||||
// If COPY, pull all x-amz-meta keys/values from source object
|
// If COPY, pull all x-amz-meta keys/values from source object
|
||||||
// Otherwise, pull all x-amz-meta keys/values from request headers
|
// Otherwise, pull all x-amz-meta keys/values from request headers
|
||||||
const userMetadata = whichMetadata === 'COPY' ?
|
const userMetadata = whichMetadata === 'COPY' ?
|
||||||
utils.getMetaHeaders(sourceObjMD) :
|
getMetaHeaders(sourceObjMD) :
|
||||||
utils.getMetaHeaders(headers);
|
getMetaHeaders(headers);
|
||||||
|
if (userMetadata instanceof Error) {
|
||||||
|
log.debug('user metadata validation failed', {
|
||||||
|
error: userMetadata,
|
||||||
|
method: 'objectCopy',
|
||||||
|
});
|
||||||
|
return { error: userMetadata };
|
||||||
|
}
|
||||||
|
|
||||||
|
// If tagging directive is REPLACE but you don't specify any
|
||||||
|
// tags in the request, the destination object will
|
||||||
|
// not have any tags.
|
||||||
|
// If tagging directive is COPY but the source object does not have tags,
|
||||||
|
// the destination object will not have any tags.
|
||||||
|
let tagging;
|
||||||
|
let taggingCopy;
|
||||||
|
if (whichTagging === 'COPY') {
|
||||||
|
taggingCopy = sourceObjMD.tags || {};
|
||||||
|
} else {
|
||||||
|
tagging = headers['x-amz-tagging'] || '';
|
||||||
|
}
|
||||||
|
|
||||||
// If COPY, pull the necessary headers from source object
|
// If COPY, pull the necessary headers from source object
|
||||||
// Otherwise, pull them from request headers
|
// Otherwise, pull them from request headers
|
||||||
|
@ -80,6 +117,8 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
|
||||||
expires: headersToStoreSource.expires,
|
expires: headersToStoreSource.expires,
|
||||||
overrideMetadata,
|
overrideMetadata,
|
||||||
lastModifiedDate: new Date().toJSON(),
|
lastModifiedDate: new Date().toJSON(),
|
||||||
|
tagging,
|
||||||
|
taggingCopy,
|
||||||
};
|
};
|
||||||
|
|
||||||
// In case whichMetadata === 'REPLACE' but contentType is undefined in copy
|
// In case whichMetadata === 'REPLACE' but contentType is undefined in copy
|
||||||
|
@ -99,13 +138,13 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
|
||||||
* includes normalized headers
|
* includes normalized headers
|
||||||
* @param {string} sourceBucket - name of source bucket for object copy
|
* @param {string} sourceBucket - name of source bucket for object copy
|
||||||
* @param {string} sourceObject - name of source object for object copy
|
* @param {string} sourceObject - name of source object for object copy
|
||||||
|
* @param {string} sourceVersionId - versionId of source object for copy
|
||||||
* @param {object} log - the log request
|
* @param {object} log - the log request
|
||||||
* @param {function} callback - final callback to call with the result
|
* @param {function} callback - final callback to call with the result
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function objectCopy(authInfo, request, sourceBucket,
|
function objectCopy(authInfo, request, sourceBucket,
|
||||||
sourceObject, log, callback) {
|
sourceObject, sourceVersionId, log, callback) {
|
||||||
log.debug('processing request', { method: 'objectCopy' });
|
log.debug('processing request', { method: 'objectCopy' });
|
||||||
const destBucketName = request.bucketName;
|
const destBucketName = request.bucketName;
|
||||||
const destObjectKey = request.objectKey;
|
const destObjectKey = request.objectKey;
|
||||||
|
@ -115,34 +154,38 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName: sourceBucket,
|
bucketName: sourceBucket,
|
||||||
objectKey: sourceObject,
|
objectKey: sourceObject,
|
||||||
|
versionId: sourceVersionId,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const valPutParams = {
|
const valPutParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const dataStoreContext = {
|
const dataStoreContext = {
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
owner: authInfo.getCanonicalID(),
|
owner: authInfo.getCanonicalID(),
|
||||||
namespace: request.namespace,
|
namespace: request.namespace,
|
||||||
|
objectKey: destObjectKey,
|
||||||
};
|
};
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
|
|
||||||
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
|
if (!validateWebsiteHeader(websiteRedirectHeader)) {
|
||||||
const err = errors.InvalidRedirectLocation;
|
const err = errors.InvalidRedirectLocation;
|
||||||
log.debug('invalid x-amz-website-redirect-location' +
|
log.debug('invalid x-amz-website-redirect-location' +
|
||||||
`value ${websiteRedirectHeader}`, { error: err });
|
`value ${websiteRedirectHeader}`, { error: err });
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
|
const queryContainsVersionId = checkQueryVersionId(request.query);
|
||||||
|
if (queryContainsVersionId instanceof Error) {
|
||||||
|
return callback(queryContainsVersionId);
|
||||||
|
}
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function checkDestAuth(next) {
|
function checkDestAuth(next) {
|
||||||
return services.metadataValidateAuthorization(valPutParams,
|
return metadataValidateBucketAndObj(valPutParams, log,
|
||||||
(err, destBucketMD, destObjMD) => {
|
(err, destBucketMD, destObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating put part of request',
|
log.debug('error validating put part of request',
|
||||||
|
@ -160,15 +203,32 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
|
||||||
return services.metadataValidateAuthorization(valGetParams,
|
return metadataValidateBucketAndObj(valGetParams, log,
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
if (!sourceObjMD) {
|
if (!sourceObjMD) {
|
||||||
|
const err = sourceVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
log.debug('no source object', { sourceObject });
|
log.debug('no source object', { sourceObject });
|
||||||
|
return next(err, null, destBucketMD);
|
||||||
|
}
|
||||||
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
|
log.debug('delete marker on source object',
|
||||||
|
{ sourceObject });
|
||||||
|
if (sourceVersionId) {
|
||||||
|
const err = errors.InvalidRequest
|
||||||
|
.customizeDescription('The source of a copy ' +
|
||||||
|
'request may not specifically refer to a delete' +
|
||||||
|
'marker by version id.');
|
||||||
|
return next(err, destBucketMD);
|
||||||
|
}
|
||||||
|
// if user specifies a key in a versioned source bucket
|
||||||
|
// without specifying a version, and the object has
|
||||||
|
// a delete marker, return NoSuchKey
|
||||||
return next(errors.NoSuchKey, destBucketMD);
|
return next(errors.NoSuchKey, destBucketMD);
|
||||||
}
|
}
|
||||||
const headerValResult =
|
const headerValResult =
|
||||||
|
@ -202,8 +262,8 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
sourceObjMD['x-amz-server-side-encryption'];
|
sourceObjMD['x-amz-server-side-encryption'];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return next(null, storeMetadataParams,
|
return next(null, storeMetadataParams, dataLocator,
|
||||||
dataLocator, destBucketMD, destObjMD);
|
destBucketMD, destObjMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function goGetData(storeMetadataParams, dataLocator, destBucketMD,
|
function goGetData(storeMetadataParams, dataLocator, destBucketMD,
|
||||||
|
@ -217,6 +277,14 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
return next(null, storeMetadataParams, dataLocator, destObjMD,
|
return next(null, storeMetadataParams, dataLocator, destObjMD,
|
||||||
serverSideEncryption, destBucketMD);
|
serverSideEncryption, destBucketMD);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const backendInfoObj = locationConstraintCheck(request,
|
||||||
|
storeMetadataParams.metaHeaders, destBucketMD, log);
|
||||||
|
if (backendInfoObj.err) {
|
||||||
|
return next(backendInfoObj.err);
|
||||||
|
}
|
||||||
|
const backendInfo = backendInfoObj.backendInfo;
|
||||||
|
|
||||||
// dataLocator is an array. need to get and put all parts
|
// dataLocator is an array. need to get and put all parts
|
||||||
// For now, copy 1 part at a time. Could increase the second
|
// For now, copy 1 part at a time. Could increase the second
|
||||||
// argument here to increase the number of parts
|
// argument here to increase the number of parts
|
||||||
|
@ -237,7 +305,8 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
return cb(errors.InternalError);
|
return cb(errors.InternalError);
|
||||||
}
|
}
|
||||||
return data.put(cipherBundle, stream,
|
return data.put(cipherBundle, stream,
|
||||||
part.size, dataStoreContext, log,
|
part.size, dataStoreContext,
|
||||||
|
backendInfo, log,
|
||||||
(error, partRetrievalInfo) => {
|
(error, partRetrievalInfo) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
return cb(error);
|
return cb(error);
|
||||||
|
@ -259,14 +328,17 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
// Copied object is not encrypted so just put it
|
// Copied object is not encrypted so just put it
|
||||||
// without a cipherBundle
|
// without a cipherBundle
|
||||||
|
|
||||||
return data.put(null, stream, part.size,
|
return data.put(null, stream, part.size,
|
||||||
dataStoreContext, log, (error, partRetrievalInfo) => {
|
dataStoreContext, backendInfo,
|
||||||
|
log, (error, partRetrievalInfo) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
return cb(error);
|
return cb(error);
|
||||||
}
|
}
|
||||||
const partResult = {
|
const partResult = {
|
||||||
key: partRetrievalInfo.key,
|
key: partRetrievalInfo.key,
|
||||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
dataStoreName: partRetrievalInfo.
|
||||||
|
dataStoreName,
|
||||||
start: part.start,
|
start: part.start,
|
||||||
size: part.size,
|
size: part.size,
|
||||||
};
|
};
|
||||||
|
@ -283,37 +355,58 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
destObjMD, serverSideEncryption, destBucketMD);
|
destObjMD, serverSideEncryption, destBucketMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function storeNewMetadata(storeMetadataParams, destDataGetInfoArr,
|
function getVersioningInfo(storeMetadataParams, destDataGetInfoArr,
|
||||||
destObjMD, serverSideEncryption, destBucketMD, next) {
|
destObjMD, serverSideEncryption, destBucketMD, next) {
|
||||||
|
return versioningPreprocessing(destBucketName,
|
||||||
|
destBucketMD, destObjectKey, destObjMD, log,
|
||||||
|
(err, options) => {
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing versioning info',
|
||||||
|
{ error: err });
|
||||||
|
return next(err, null, destBucketMD);
|
||||||
|
}
|
||||||
|
// eslint-disable-next-line
|
||||||
|
storeMetadataParams.versionId = options.versionId;
|
||||||
|
// eslint-disable-next-line
|
||||||
|
storeMetadataParams.versioning = options.versioning;
|
||||||
|
// eslint-disable-next-line
|
||||||
|
storeMetadataParams.isNull = options.isNull;
|
||||||
|
// eslint-disable-next-line
|
||||||
|
storeMetadataParams.nullVersionId = options.nullVersionId;
|
||||||
|
const dataToDelete = options.dataToDelete;
|
||||||
|
return next(null, storeMetadataParams, destDataGetInfoArr,
|
||||||
|
destObjMD, serverSideEncryption, destBucketMD,
|
||||||
|
dataToDelete);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function storeNewMetadata(storeMetadataParams, destDataGetInfoArr,
|
||||||
|
destObjMD, serverSideEncryption, destBucketMD, dataToDelete, next) {
|
||||||
return services.metadataStoreObject(destBucketName,
|
return services.metadataStoreObject(destBucketName,
|
||||||
destDataGetInfoArr,
|
destDataGetInfoArr, serverSideEncryption,
|
||||||
serverSideEncryption, storeMetadataParams, err => {
|
storeMetadataParams, (err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error storing new metadata', { error: err });
|
log.debug('error storing new metadata', { error: err });
|
||||||
return next(err, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
// Clean up any potential orphans in data if object
|
// Clean up any potential orphans in data if object
|
||||||
// put is an overwrite of already existing
|
// put is an overwrite of already existing
|
||||||
// object with same name
|
// object with same name, so long as the source is not
|
||||||
// so long as the source is not the same as the destination
|
// the same as the destination
|
||||||
let dataToDelete;
|
if (!sourceIsDestination && dataToDelete) {
|
||||||
if (destObjMD && destObjMD.location &&
|
data.batchDelete(dataToDelete, request.method, null,
|
||||||
!sourceIsDestination) {
|
|
||||||
dataToDelete = Array.isArray(destObjMD.location) ?
|
|
||||||
destObjMD.location : [destObjMD.location];
|
|
||||||
data.batchDelete(dataToDelete,
|
|
||||||
logger.newRequestLoggerFromSerializedUids(
|
logger.newRequestLoggerFromSerializedUids(
|
||||||
log.getSerializedUids()));
|
log.getSerializedUids()));
|
||||||
}
|
}
|
||||||
const sourceObjSize = storeMetadataParams.size;
|
const sourceObjSize = storeMetadataParams.size;
|
||||||
const destObjPrevSize = destObjMD ?
|
const destObjPrevSize = (destObjMD &&
|
||||||
|
destObjMD['content-length'] !== undefined) ?
|
||||||
destObjMD['content-length'] : null;
|
destObjMD['content-length'] : null;
|
||||||
return next(null, destBucketMD, storeMetadataParams,
|
return next(null, result, destBucketMD, storeMetadataParams,
|
||||||
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
], (err, destBucketMD, storeMetadataParams, serverSideEncryption,
|
], (err, storingNewMdResult, destBucketMD, storeMetadataParams,
|
||||||
sourceObjSize, destObjPrevSize) => {
|
serverSideEncryption, sourceObjSize, destObjPrevSize) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, destBucketMD);
|
request.method, destBucketMD);
|
||||||
|
|
||||||
|
@ -338,15 +431,24 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
serverSideEncryption.masterKeyId;
|
serverSideEncryption.masterKeyId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (sourceVersionId) {
|
||||||
|
additionalHeaders['x-amz-copy-source-version-id'] =
|
||||||
|
versionIdUtils.encode(sourceVersionId);
|
||||||
|
}
|
||||||
|
const isVersioned = storingNewMdResult && storingNewMdResult.versionId;
|
||||||
|
if (isVersioned) {
|
||||||
|
additionalHeaders['x-amz-version-id'] =
|
||||||
|
versionIdUtils.encode(storingNewMdResult.versionId);
|
||||||
|
}
|
||||||
pushMetric('copyObject', log, {
|
pushMetric('copyObject', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: destBucketName,
|
bucket: destBucketName,
|
||||||
newByteLength: sourceObjSize,
|
newByteLength: sourceObjSize,
|
||||||
oldByteLength: destObjPrevSize,
|
oldByteLength: isVersioned ? null : destObjPrevSize,
|
||||||
});
|
});
|
||||||
// TODO: Add version headers for response
|
|
||||||
// (if source or destination is version).
|
|
||||||
// Add expiration header if lifecycle enabled
|
// Add expiration header if lifecycle enabled
|
||||||
return callback(null, xml, additionalHeaders);
|
return callback(null, xml, additionalHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectCopy;
|
||||||
|
|
|
@ -1,13 +1,18 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
|
const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import services from '../services';
|
const services = require('../services');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||||
|
const { decodeVersionId, preprocessingVersioningDelete }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* objectDelete - DELETE an object from a bucket
|
* objectDelete - DELETE an object from a bucket
|
||||||
* (currently supports only non-versioned buckets)
|
|
||||||
* @param {AuthInfo} authInfo - requester's infos
|
* @param {AuthInfo} authInfo - requester's infos
|
||||||
* @param {object} request - request object given by router,
|
* @param {object} request - request object given by router,
|
||||||
* includes normalized headers
|
* includes normalized headers
|
||||||
|
@ -15,52 +20,134 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* @param {function} cb - final cb to call with the result and response headers
|
* @param {function} cb - final cb to call with the result and response headers
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function objectDelete(authInfo, request, log, cb) {
|
function objectDelete(authInfo, request, log, cb) {
|
||||||
log.debug('processing request', { method: 'objectDelete' });
|
log.debug('processing request', { method: 'objectDelete' });
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
log.warn('operation not available for public user');
|
log.debug('operation not available for public user');
|
||||||
return cb(errors.AccessDenied);
|
return cb(errors.AccessDenied);
|
||||||
}
|
}
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return cb(decodedVidResult);
|
||||||
|
}
|
||||||
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
const valParams = {
|
const valParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
versionId: reqVersionId,
|
||||||
requestType: 'objectDelete',
|
requestType: 'objectDelete',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
return services.metadataValidateAuthorization(valParams,
|
|
||||||
(err, bucket, objMD) => {
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
return async.waterfall([
|
||||||
request.method, bucket);
|
function validateBucketAndObj(next) {
|
||||||
|
return metadataValidateBucketAndObj(valParams, log,
|
||||||
|
(err, bucketMD, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
return next(err, bucketMD);
|
||||||
error: err,
|
|
||||||
method: 'metadataValidateAuthorization',
|
|
||||||
});
|
|
||||||
return cb(err, corsHeaders);
|
|
||||||
}
|
}
|
||||||
|
const versioningCfg = bucketMD.getVersioningConfiguration();
|
||||||
if (!objMD) {
|
if (!objMD) {
|
||||||
return cb(errors.NoSuchKey, corsHeaders);
|
if (!versioningCfg) {
|
||||||
|
return next(errors.NoSuchKey, bucketMD);
|
||||||
}
|
}
|
||||||
if (objMD['content-length']) {
|
// AWS does not return an error when trying to delete a
|
||||||
|
// specific version that does not exist. We skip to the end
|
||||||
|
// of the waterfall here.
|
||||||
|
if (reqVersionId) {
|
||||||
|
log.debug('trying to delete specific version ' +
|
||||||
|
' that does not exist');
|
||||||
|
return next(errors.NoSuchVersion, bucketMD);
|
||||||
|
}
|
||||||
|
// To adhere to AWS behavior, create a delete marker even
|
||||||
|
// if trying to delete an object that does not exist when
|
||||||
|
// versioning has been configured
|
||||||
|
return next(null, bucketMD, objMD);
|
||||||
|
}
|
||||||
|
if (objMD['content-length'] !== undefined) {
|
||||||
log.end().addDefaultFields({
|
log.end().addDefaultFields({
|
||||||
contentLength: objMD['content-length'],
|
bytesDeleted: objMD['content-length'],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objMD, objectKey, log,
|
return next(null, bucketMD, objMD);
|
||||||
err => {
|
});
|
||||||
|
},
|
||||||
|
function getVersioningInfo(bucketMD, objectMD, next) {
|
||||||
|
return preprocessingVersioningDelete(bucketName,
|
||||||
|
bucketMD, objectMD, reqVersionId, log,
|
||||||
|
(err, options) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err, corsHeaders);
|
log.error('err processing versioning info',
|
||||||
|
{ error: err });
|
||||||
|
return next(err, bucketMD);
|
||||||
}
|
}
|
||||||
pushMetric('deleteObject', log, {
|
return next(null, bucketMD, objectMD, options);
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
byteLength: objMD['content-length'],
|
|
||||||
numberOfObjects: 1,
|
|
||||||
});
|
|
||||||
return cb(null, corsHeaders);
|
|
||||||
});
|
});
|
||||||
|
},
|
||||||
|
function deleteOperation(bucketMD, objectMD, delOptions, next) {
|
||||||
|
const deleteInfo = {
|
||||||
|
removeDeleteMarker: false,
|
||||||
|
newDeleteMarker: false,
|
||||||
|
};
|
||||||
|
if (delOptions && delOptions.deleteData) {
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
// record that we deleted a delete marker to set
|
||||||
|
// response headers accordingly
|
||||||
|
deleteInfo.removeDeleteMarker = true;
|
||||||
|
}
|
||||||
|
return services.deleteObject(bucketName, objectMD, objectKey,
|
||||||
|
delOptions, log, (err, delResult) => next(err, bucketMD,
|
||||||
|
objectMD, delResult, deleteInfo));
|
||||||
|
}
|
||||||
|
// putting a new delete marker
|
||||||
|
deleteInfo.newDeleteMarker = true;
|
||||||
|
return createAndStoreObject(bucketName, bucketMD,
|
||||||
|
objectKey, objectMD, authInfo, canonicalID, null, request,
|
||||||
|
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
|
||||||
|
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
|
||||||
|
},
|
||||||
|
], (err, bucketMD, objectMD, result, deleteInfo) => {
|
||||||
|
const resHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucketMD);
|
||||||
|
// if deleting a specific version or delete marker, return version id
|
||||||
|
// in the response headers, even in case of NoSuchVersion
|
||||||
|
if (reqVersionId) {
|
||||||
|
resHeaders['x-amz-version-id'] = reqVersionId === 'null' ?
|
||||||
|
reqVersionId : versionIdUtils.encode(reqVersionId);
|
||||||
|
if (deleteInfo && deleteInfo.removeDeleteMarker) {
|
||||||
|
resHeaders['x-amz-delete-marker'] = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', { error: err,
|
||||||
|
method: 'objectDelete' });
|
||||||
|
return cb(err, resHeaders);
|
||||||
|
}
|
||||||
|
if (deleteInfo.newDeleteMarker) {
|
||||||
|
// if we created a new delete marker, return true for
|
||||||
|
// x-amz-delete-marker and the version ID of the new delete marker
|
||||||
|
if (result.versionId) {
|
||||||
|
resHeaders['x-amz-delete-marker'] = true;
|
||||||
|
resHeaders['x-amz-version-id'] = result.versionId === 'null' ?
|
||||||
|
result.versionId : versionIdUtils.encode(result.versionId);
|
||||||
|
}
|
||||||
|
pushMetric('putDeleteMarkerObject', log, { authInfo,
|
||||||
|
bucket: bucketName });
|
||||||
|
} else {
|
||||||
|
pushMetric('deleteObject', log, { authInfo, bucket: bucketName,
|
||||||
|
byteLength: objectMD['content-length'], numberOfObjects: 1 });
|
||||||
|
}
|
||||||
|
return cb(err, resHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectDelete;
|
||||||
|
|
|
@ -0,0 +1,94 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Object Delete Tagging - Delete tag set from an object
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'objectDeleteTagging' });
|
||||||
|
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(decodedVidResult));
|
||||||
|
}
|
||||||
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
requestType: 'bucketOwnerAction',
|
||||||
|
versionId: reqVersionId,
|
||||||
|
};
|
||||||
|
|
||||||
|
return async.waterfall([
|
||||||
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
|
(err, bucket, objectMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('request authorization failed',
|
||||||
|
{ method: 'objectDeleteTagging', error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
if (!objectMD) {
|
||||||
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
|
log.trace('error no object metadata found',
|
||||||
|
{ method: 'objectDeleteTagging', error: err });
|
||||||
|
return next(err, bucket);
|
||||||
|
}
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
log.trace('version is a delete marker',
|
||||||
|
{ method: 'objectDeleteTagging' });
|
||||||
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
|
}
|
||||||
|
return next(null, bucket, objectMD);
|
||||||
|
}),
|
||||||
|
(bucket, objectMD, next) => {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
objectMD.tags = {};
|
||||||
|
const params = objectMD.versionId ? { versionId:
|
||||||
|
objectMD.versionId } : {};
|
||||||
|
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||||
|
log, err =>
|
||||||
|
next(err, bucket, objectMD));
|
||||||
|
},
|
||||||
|
], (err, bucket, objectMD) => {
|
||||||
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.trace('error processing request', { error: err,
|
||||||
|
method: 'objectDeleteTagging' });
|
||||||
|
} else {
|
||||||
|
pushMetric('deleteObjectTagging', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
additionalResHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objectMD);
|
||||||
|
}
|
||||||
|
return callback(err, additionalResHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = objectDeleteTagging;
|
|
@ -1,91 +1,113 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import { parseRange } from './apiUtils/object/parseRange';
|
const { parseRange } = require('arsenal/lib/network/http/utils');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
|
||||||
import collectResponseHeaders from '../utilities/collectResponseHeaders';
|
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
||||||
import services from '../services';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
|
||||||
|
const setPartRanges = require('./apiUtils/object/setPartRanges');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* GET Object - Get an object
|
* GET Object - Get an object
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
* @param {object} request - normalized request object
|
* @param {object} request - normalized request object
|
||||||
|
* @param {boolean} returnTagCount - returns the x-amz-tagging-count header
|
||||||
* @param {object} log - Werelogs instance
|
* @param {object} log - Werelogs instance
|
||||||
* @param {function} callback - callback to function in route
|
* @param {function} callback - callback to function in route
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
function objectGet(authInfo, request, returnTagCount, log, callback) {
|
||||||
function objectGet(authInfo, request, log, callback) {
|
|
||||||
log.debug('processing request', { method: 'objectGet' });
|
log.debug('processing request', { method: 'objectGet' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return callback(decodedVidResult);
|
||||||
|
}
|
||||||
|
const versionId = decodedVidResult;
|
||||||
|
|
||||||
const mdValParams = {
|
const mdValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
versionId,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
services.metadataValidateAuthorization(mdValParams, (err, bucket,
|
return metadataValidateBucketAndObj(mdValParams, log,
|
||||||
objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { error: err });
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'metadataValidateBucketAndObj',
|
||||||
|
});
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
if (!objMD) {
|
if (!objMD) {
|
||||||
return callback(errors.NoSuchKey, null, corsHeaders);
|
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||||
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
if (objMD.isDeleteMarker) {
|
||||||
|
const responseMetaHeaders = Object.assign({},
|
||||||
|
{ 'x-amz-delete-marker': true }, corsHeaders);
|
||||||
|
if (!versionId) {
|
||||||
|
return callback(errors.NoSuchKey, null, responseMetaHeaders);
|
||||||
|
}
|
||||||
|
// return MethodNotAllowed if requesting a specific
|
||||||
|
// version that has a delete marker
|
||||||
|
responseMetaHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objMD);
|
||||||
|
return callback(errors.MethodNotAllowed, null,
|
||||||
|
responseMetaHeaders);
|
||||||
}
|
}
|
||||||
const headerValResult = validateHeaders(objMD, request.headers);
|
const headerValResult = validateHeaders(objMD, request.headers);
|
||||||
if (headerValResult.error) {
|
if (headerValResult.error) {
|
||||||
return callback(headerValResult.error, null, corsHeaders);
|
return callback(headerValResult.error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
const responseMetaHeaders = collectResponseHeaders(objMD, corsHeaders);
|
const responseMetaHeaders = collectResponseHeaders(objMD,
|
||||||
// 0 bytes file
|
corsHeaders, verCfg, returnTagCount);
|
||||||
if (objMD.location === null) {
|
|
||||||
|
const objLength = (objMD.location === null ?
|
||||||
|
0 : parseInt(objMD['content-length'], 10));
|
||||||
|
let byteRange;
|
||||||
if (request.headers.range) {
|
if (request.headers.range) {
|
||||||
return callback(errors.InvalidRange, null, corsHeaders);
|
const { range, error } = parseRange(request.headers.range,
|
||||||
}
|
objLength);
|
||||||
pushMetric('getObject', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
newByteLength: 0,
|
|
||||||
});
|
|
||||||
return callback(null, null, responseMetaHeaders);
|
|
||||||
}
|
|
||||||
let range;
|
|
||||||
let maxContentLength;
|
|
||||||
if (request.headers.range) {
|
|
||||||
maxContentLength =
|
|
||||||
parseInt(responseMetaHeaders['Content-Length'], 10);
|
|
||||||
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
|
||||||
const parseRangeRes = parseRange(request.headers.range,
|
|
||||||
maxContentLength);
|
|
||||||
range = parseRangeRes.range;
|
|
||||||
const error = parseRangeRes.error;
|
|
||||||
if (error) {
|
if (error) {
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
|
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
||||||
if (range) {
|
if (range) {
|
||||||
|
byteRange = range;
|
||||||
// End of range should be included so + 1
|
// End of range should be included so + 1
|
||||||
responseMetaHeaders['Content-Length'] =
|
responseMetaHeaders['Content-Length'] =
|
||||||
Math.min(maxContentLength - range[0],
|
range[1] - range[0] + 1;
|
||||||
range[1] - range[0] + 1);
|
responseMetaHeaders['Content-Range'] =
|
||||||
responseMetaHeaders['Content-Range'] = `bytes ${range[0]}-`
|
`bytes ${range[0]}-${range[1]}/${objLength}`;
|
||||||
+ `${Math.min(maxContentLength - 1, range[1])}` +
|
|
||||||
`/${maxContentLength}`;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// To provide for backwards compatibility before md-model-version 2,
|
let dataLocator = null;
|
||||||
// need to handle cases where objMD.location is just a string
|
if (objMD.location !== null) {
|
||||||
const dataLocator = Array.isArray(objMD.location) ?
|
// To provide for backwards compatibility before
|
||||||
|
// md-model-version 2, need to handle cases where
|
||||||
|
// objMD.location is just a string
|
||||||
|
dataLocator = Array.isArray(objMD.location) ?
|
||||||
objMD.location : [{ key: objMD.location }];
|
objMD.location : [{ key: objMD.location }];
|
||||||
// If have a data model before version 2, cannot support get range
|
// If have a data model before version 2, cannot support
|
||||||
// for objects with multiple parts
|
// get range for objects with multiple parts
|
||||||
if (range && dataLocator.length > 1 &&
|
if (byteRange && dataLocator.length > 1 &&
|
||||||
dataLocator[0].start === undefined) {
|
dataLocator[0].start === undefined) {
|
||||||
return callback(errors.NotImplemented, null, corsHeaders);
|
return callback(errors.NotImplemented, null, corsHeaders);
|
||||||
}
|
}
|
||||||
|
@ -97,11 +119,15 @@ function objectGet(authInfo, request, log, callback) {
|
||||||
objMD['x-amz-server-side-encryption'];
|
objMD['x-amz-server-side-encryption'];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
dataLocator = setPartRanges(dataLocator, byteRange);
|
||||||
|
}
|
||||||
pushMetric('getObject', log, {
|
pushMetric('getObject', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
newByteLength: responseMetaHeaders['Content-Length'],
|
newByteLength: responseMetaHeaders['Content-Length'],
|
||||||
});
|
});
|
||||||
return callback(null, dataLocator, responseMetaHeaders, range);
|
return callback(null, dataLocator, responseMetaHeaders, byteRange);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectGet;
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import services from '../services';
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
import vault from '../auth/vault';
|
= require('./apiUtils/object/versioning');
|
||||||
|
const vault = require('../auth/vault');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
// Sample XML response:
|
// Sample XML response:
|
||||||
/*
|
/*
|
||||||
|
@ -28,7 +31,6 @@ import vault from '../auth/vault';
|
||||||
</AccessControlPolicy>
|
</AccessControlPolicy>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* objectGetACL - Return ACL for object
|
* objectGetACL - Return ACL for object
|
||||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
@ -37,16 +39,27 @@ import vault from '../auth/vault';
|
||||||
* @param {function} callback - callback to respond to http request
|
* @param {function} callback - callback to respond to http request
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function objectGetACL(authInfo, request, log, callback) {
|
function objectGetACL(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'objectGetACL' });
|
log.debug('processing request', { method: 'objectGetACL' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return callback(decodedVidResult);
|
||||||
|
}
|
||||||
|
const versionId = decodedVidResult;
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
versionId,
|
||||||
requestType: 'objectGetACL',
|
requestType: 'objectGetACL',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
const grantInfo = {
|
const grantInfo = {
|
||||||
grants: [],
|
grants: [],
|
||||||
|
@ -60,20 +73,38 @@ export default function objectGetACL(authInfo, request, log, callback) {
|
||||||
constants.logId,
|
constants.logId,
|
||||||
];
|
];
|
||||||
|
|
||||||
services.metadataValidateAuthorization(metadataValParams,
|
return async.waterfall([
|
||||||
|
function validateBucketAndObj(next) {
|
||||||
|
return metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
|
||||||
request.method, bucket);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectGetACL', error: err });
|
{ method: 'objectGetACL', error: err });
|
||||||
return callback(err, null, corsHeaders);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
|
const err = versionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
log.trace('error processing request',
|
log.trace('error processing request',
|
||||||
{ method: 'objectGetACL', error: err });
|
{ method: 'objectGetACL', error: err });
|
||||||
return callback(errors.NoSuchKey, null, corsHeaders);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
if (versionId) {
|
||||||
|
log.trace('requested version is delete marker',
|
||||||
|
{ method: 'objectGetACL' });
|
||||||
|
return next(errors.MethodNotAllowed);
|
||||||
|
}
|
||||||
|
log.trace('most recent version is delete marker',
|
||||||
|
{ method: 'objectGetACL' });
|
||||||
|
return next(errors.NoSuchKey);
|
||||||
|
}
|
||||||
|
return next(null, bucket, objectMD);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function gatherACLs(bucket, objectMD, next) {
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
const resVersionId = getVersionIdResHeader(verCfg, objectMD);
|
||||||
const objectACL = objectMD.acl;
|
const objectACL = objectMD.acl;
|
||||||
const allSpecificGrants = [].concat(
|
const allSpecificGrants = [].concat(
|
||||||
objectACL.FULL_CONTROL,
|
objectACL.FULL_CONTROL,
|
||||||
|
@ -105,11 +136,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
grantInfo.grants = grantInfo.grants.concat(cannedGrants);
|
grantInfo.grants = grantInfo.grants.concat(cannedGrants);
|
||||||
const xml = aclUtils.convertToXml(grantInfo);
|
const xml = aclUtils.convertToXml(grantInfo);
|
||||||
pushMetric('getObjectAcl', log, {
|
return next(null, bucket, xml, resVersionId);
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, xml, corsHeaders);
|
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Build array of all canonicalIDs used in ACLs so duplicates
|
* Build array of all canonicalIDs used in ACLs so duplicates
|
||||||
|
@ -139,11 +166,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
|
||||||
*/
|
*/
|
||||||
grantInfo.grants = grantInfo.grants.concat(uriGrantInfo);
|
grantInfo.grants = grantInfo.grants.concat(uriGrantInfo);
|
||||||
const xml = aclUtils.convertToXml(grantInfo);
|
const xml = aclUtils.convertToXml(grantInfo);
|
||||||
pushMetric('getObjectAcl', log, {
|
return next(null, bucket, xml, resVersionId);
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, xml, corsHeaders);
|
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* If acl's set by account canonicalID,
|
* If acl's set by account canonicalID,
|
||||||
|
@ -154,7 +177,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error processing request',
|
log.trace('error processing request',
|
||||||
{ method: 'objectGetACL', error: err });
|
{ method: 'objectGetACL', error: err });
|
||||||
return callback(err, null, corsHeaders);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
const individualGrants = canonicalIDs.map(canonicalID => {
|
const individualGrants = canonicalIDs.map(canonicalID => {
|
||||||
/**
|
/**
|
||||||
|
@ -180,11 +203,22 @@ export default function objectGetACL(authInfo, request, log, callback) {
|
||||||
.concat(individualGrants).concat(uriGrantInfo);
|
.concat(individualGrants).concat(uriGrantInfo);
|
||||||
// parse info about accounts and owner info to convert to xml
|
// parse info about accounts and owner info to convert to xml
|
||||||
const xml = aclUtils.convertToXml(grantInfo);
|
const xml = aclUtils.convertToXml(grantInfo);
|
||||||
|
return next(null, bucket, xml, resVersionId);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
], (err, bucket, xml, resVersionId) => {
|
||||||
|
const resHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
return callback(err, null, resHeaders);
|
||||||
|
}
|
||||||
pushMetric('getObjectAcl', log, {
|
pushMetric('getObjectAcl', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
});
|
});
|
||||||
return callback(null, xml, corsHeaders);
|
resHeaders['x-amz-version-id'] = resVersionId;
|
||||||
});
|
return callback(null, xml, resHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectGetACL;
|
||||||
|
|
|
@ -0,0 +1,95 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { convertToXml } = require('./apiUtils/object/tagging');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Object Get Tagging - Return tag for object
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function objectGetTagging(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'objectGetTagging' });
|
||||||
|
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(decodedVidResult));
|
||||||
|
}
|
||||||
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
requestType: 'bucketOwnerAction',
|
||||||
|
versionId: reqVersionId,
|
||||||
|
};
|
||||||
|
|
||||||
|
return async.waterfall([
|
||||||
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
|
(err, bucket, objectMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('request authorization failed',
|
||||||
|
{ method: 'objectGetTagging', error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
if (!objectMD) {
|
||||||
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
|
log.trace('error no object metadata found',
|
||||||
|
{ method: 'objectGetTagging', error: err });
|
||||||
|
return next(err, bucket);
|
||||||
|
}
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
if (reqVersionId) {
|
||||||
|
log.trace('requested version is delete marker',
|
||||||
|
{ method: 'objectGetTagging' });
|
||||||
|
return next(errors.MethodNotAllowed);
|
||||||
|
}
|
||||||
|
log.trace('most recent version is delete marker',
|
||||||
|
{ method: 'objectGetTagging' });
|
||||||
|
return next(errors.NoSuchKey);
|
||||||
|
}
|
||||||
|
return next(null, bucket, objectMD);
|
||||||
|
}),
|
||||||
|
(bucket, objectMD, next) => {
|
||||||
|
const tags = objectMD.tags;
|
||||||
|
const xml = convertToXml(tags);
|
||||||
|
next(null, bucket, xml, objectMD);
|
||||||
|
},
|
||||||
|
], (err, bucket, xml, objectMD) => {
|
||||||
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.trace('error processing request', { error: err,
|
||||||
|
method: 'objectGetTagging' });
|
||||||
|
} else {
|
||||||
|
pushMetric('getObjectTagging', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
additionalResHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objectMD);
|
||||||
|
}
|
||||||
|
return callback(err, xml, additionalResHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = objectGetTagging;
|
|
@ -1,10 +1,13 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
||||||
import collectResponseHeaders from '../utilities/collectResponseHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import services from '../services';
|
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* HEAD Object - Same as Get Object but only respond with headers
|
* HEAD Object - Same as Get Object but only respond with headers
|
||||||
|
@ -16,42 +19,66 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
export default function objectHead(authInfo, request, log, callback) {
|
function objectHead(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'objectHead' });
|
log.debug('processing request', { method: 'objectHead' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
const metadataValParams = {
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return callback(decodedVidResult);
|
||||||
|
}
|
||||||
|
const versionId = decodedVidResult;
|
||||||
|
|
||||||
|
const mdValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
versionId,
|
||||||
requestType: 'objectHead',
|
requestType: 'objectHead',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
return services.metadataValidateAuthorization(metadataValParams,
|
return metadataValidateBucketAndObj(mdValParams, log,
|
||||||
(err, bucket, objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error validating request', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'metadataValidateAuthorization',
|
method: 'objectHead',
|
||||||
});
|
});
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
}
|
}
|
||||||
if (!objMD) {
|
if (!objMD) {
|
||||||
return callback(errors.NoSuchKey, corsHeaders);
|
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||||
|
return callback(err, corsHeaders);
|
||||||
|
}
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
if (objMD.isDeleteMarker) {
|
||||||
|
const responseHeaders = Object.assign({},
|
||||||
|
{ 'x-amz-delete-marker': true }, corsHeaders);
|
||||||
|
if (!versionId) {
|
||||||
|
return callback(errors.NoSuchKey, responseHeaders);
|
||||||
|
}
|
||||||
|
// return MethodNotAllowed if requesting a specific
|
||||||
|
// version that has a delete marker
|
||||||
|
responseHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objMD);
|
||||||
|
return callback(errors.MethodNotAllowed, responseHeaders);
|
||||||
}
|
}
|
||||||
const headerValResult = validateHeaders(objMD, request.headers);
|
const headerValResult = validateHeaders(objMD, request.headers);
|
||||||
if (headerValResult.error) {
|
if (headerValResult.error) {
|
||||||
return callback(headerValResult.error, corsHeaders);
|
return callback(headerValResult.error, corsHeaders);
|
||||||
}
|
}
|
||||||
const responseMetaHeaders = collectResponseHeaders(objMD,
|
const responseHeaders =
|
||||||
corsHeaders);
|
collectResponseHeaders(objMD, corsHeaders, verCfg);
|
||||||
pushMetric('headObject', log, {
|
pushMetric('headObject', log, { authInfo, bucket: bucketName });
|
||||||
authInfo,
|
return callback(null, responseHeaders);
|
||||||
bucket: bucketName,
|
|
||||||
});
|
|
||||||
return callback(null, responseMetaHeaders);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectHead;
|
||||||
|
|
|
@ -1,151 +1,16 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
|
const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
import data from '../data/wrapper';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
import services from '../services';
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import utils from '../utils';
|
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
|
||||||
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
|
const { checkQueryVersionId } = require('./apiUtils/object/versioning');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
import { dataStore } from './apiUtils/object/storeObject';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import constants from '../../constants';
|
const kms = require('../kms/wrapper');
|
||||||
import { logger } from '../utilities/logger';
|
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
import kms from '../kms/wrapper';
|
|
||||||
import removeAWSChunked from './apiUtils/object/removeAWSChunked';
|
|
||||||
|
|
||||||
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|
||||||
metadataStoreParams, dataToDelete, deleteLog, callback) {
|
|
||||||
services.metadataStoreObject(bucketName, dataGetInfo,
|
|
||||||
cipherBundle, metadataStoreParams, (err, contentMD5) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
if (dataToDelete) {
|
|
||||||
data.batchDelete(dataToDelete, deleteLog);
|
|
||||||
}
|
|
||||||
return callback(null, contentMD5);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
|
|
||||||
cipherBundle, request, streamingV4Params, log, callback) {
|
|
||||||
const size = request.parsedContentLength;
|
|
||||||
|
|
||||||
const websiteRedirectHeader =
|
|
||||||
request.headers['x-amz-website-redirect-location'];
|
|
||||||
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
|
|
||||||
const err = errors.InvalidRedirectLocation;
|
|
||||||
log.debug('invalid x-amz-website-redirect-location' +
|
|
||||||
`value ${websiteRedirectHeader}`, { error: err });
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
|
|
||||||
const metaHeaders = utils.getMetaHeaders(request.headers);
|
|
||||||
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
|
|
||||||
const objectKeyContext = {
|
|
||||||
bucketName,
|
|
||||||
owner: canonicalID,
|
|
||||||
namespace: request.namespace,
|
|
||||||
};
|
|
||||||
// If the request was made with a pre-signed url, the x-amz-acl 'header'
|
|
||||||
// might be in the query string rather than the actual headers so include
|
|
||||||
// it here
|
|
||||||
const headers = request.headers;
|
|
||||||
if (request.query && request.query['x-amz-acl']) {
|
|
||||||
headers['x-amz-acl'] = request.query['x-amz-acl'];
|
|
||||||
}
|
|
||||||
const metadataStoreParams = {
|
|
||||||
objectKey,
|
|
||||||
authInfo,
|
|
||||||
metaHeaders,
|
|
||||||
size,
|
|
||||||
contentType: request.headers['content-type'],
|
|
||||||
cacheControl: request.headers['cache-control'],
|
|
||||||
contentDisposition: request.headers['content-disposition'],
|
|
||||||
contentEncoding:
|
|
||||||
removeAWSChunked(request.headers['content-encoding']),
|
|
||||||
expires: request.headers.expires,
|
|
||||||
headers,
|
|
||||||
log,
|
|
||||||
};
|
|
||||||
let dataToDelete;
|
|
||||||
if (objMD && objMD.location) {
|
|
||||||
dataToDelete = Array.isArray(objMD.location) ?
|
|
||||||
objMD.location : [objMD.location];
|
|
||||||
}
|
|
||||||
|
|
||||||
// null - new object
|
|
||||||
// 0 or > 0 - existing object with content-length 0 or greater than 0
|
|
||||||
const prevContentLen = objMD && objMD['content-length'] !== undefined ?
|
|
||||||
objMD['content-length'] : null;
|
|
||||||
if (size !== 0) {
|
|
||||||
log.trace('storing object in data', {
|
|
||||||
method: 'services.metadataValidateAuthorization',
|
|
||||||
});
|
|
||||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
|
||||||
streamingV4Params, log, (err, dataGetInfo, calculatedHash) => {
|
|
||||||
if (err) {
|
|
||||||
log.trace('error from data', {
|
|
||||||
error: err,
|
|
||||||
method: 'dataStore',
|
|
||||||
});
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
// So that data retrieval information for MPU's and
|
|
||||||
// regular puts are stored in the same data structure,
|
|
||||||
// place the retrieval info here into a single element array
|
|
||||||
const dataGetInfoArr = [{
|
|
||||||
key: dataGetInfo.key,
|
|
||||||
size,
|
|
||||||
start: 0,
|
|
||||||
dataStoreName: dataGetInfo.dataStoreName,
|
|
||||||
}];
|
|
||||||
if (cipherBundle) {
|
|
||||||
dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme;
|
|
||||||
dataGetInfoArr[0].cipheredDataKey =
|
|
||||||
cipherBundle.cipheredDataKey;
|
|
||||||
}
|
|
||||||
metadataStoreParams.contentMD5 = calculatedHash;
|
|
||||||
return _storeInMDandDeleteData(
|
|
||||||
bucketName, dataGetInfoArr, cipherBundle,
|
|
||||||
metadataStoreParams, dataToDelete,
|
|
||||||
logger.newRequestLoggerFromSerializedUids(
|
|
||||||
log.getSerializedUids()), (err, contentMD5) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
pushMetric('putObject', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
newByteLength: size,
|
|
||||||
oldByteLength: prevContentLen,
|
|
||||||
});
|
|
||||||
return callback(null, contentMD5);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
log.trace('content-length is 0 so only storing metadata', {
|
|
||||||
method: 'services.metadataValidateAuthorization',
|
|
||||||
});
|
|
||||||
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
|
|
||||||
const dataGetInfo = null;
|
|
||||||
return _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|
||||||
metadataStoreParams, dataToDelete,
|
|
||||||
logger.newRequestLoggerFromSerializedUids(log
|
|
||||||
.getSerializedUids()), (err, contentMD5) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
pushMetric('putObject', log, {
|
|
||||||
authInfo,
|
|
||||||
bucket: bucketName,
|
|
||||||
newByteLength: size,
|
|
||||||
oldByteLength: prevContentLen,
|
|
||||||
});
|
|
||||||
return callback(null, contentMD5);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* PUT Object in the requested bucket. Steps include:
|
* PUT Object in the requested bucket. Steps include:
|
||||||
|
@ -166,104 +31,95 @@ function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
|
||||||
* @param {Function} callback - final callback to call with the result
|
* @param {Function} callback - final callback to call with the result
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
log.debug('processing request', { method: 'objectPut' });
|
log.debug('processing request', { method: 'objectPut' });
|
||||||
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
|
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
|
||||||
log.trace('invalid acl header');
|
log.trace('invalid acl header');
|
||||||
return callback(errors.InvalidArgument);
|
return callback(errors.InvalidArgument);
|
||||||
}
|
}
|
||||||
|
const queryContainsVersionId = checkQueryVersionId(request.query);
|
||||||
|
if (queryContainsVersionId instanceof Error) {
|
||||||
|
return callback(queryContainsVersionId);
|
||||||
|
}
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
const valParams = {
|
const requestType = 'objectPut';
|
||||||
authInfo,
|
const valParams = { authInfo, bucketName, objectKey, requestType };
|
||||||
bucketName,
|
|
||||||
objectKey,
|
|
||||||
requestType: 'objectPut',
|
|
||||||
log,
|
|
||||||
};
|
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
log.trace('owner canonicalID to send to data', { canonicalID });
|
log.trace('owner canonicalID to send to data', { canonicalID });
|
||||||
|
|
||||||
return services.metadataValidateAuthorization(valParams, (err, bucket,
|
return metadataValidateBucketAndObj(valParams, log,
|
||||||
objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const responseHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error processing request', {
|
log.trace('error processing request', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'services.metadataValidateAuthorization',
|
method: 'metadataValidateBucketAndObj',
|
||||||
});
|
});
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, responseHeaders);
|
||||||
}
|
}
|
||||||
if (bucket.hasDeletedFlag() &&
|
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
|
||||||
canonicalID !== bucket.getOwner()) {
|
|
||||||
log.trace('deleted flag on bucket and request ' +
|
log.trace('deleted flag on bucket and request ' +
|
||||||
'from non-owner account');
|
'from non-owner account');
|
||||||
return callback(errors.NoSuchBucket);
|
return callback(errors.NoSuchBucket);
|
||||||
}
|
}
|
||||||
|
return async.waterfall([
|
||||||
|
function handleTransientOrDeleteBuckets(next) {
|
||||||
|
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
|
||||||
|
return cleanUpBucket(bucket, canonicalID, log, next);
|
||||||
|
}
|
||||||
|
return next();
|
||||||
|
},
|
||||||
|
function createCipherBundle(next) {
|
||||||
const serverSideEncryption = bucket.getServerSideEncryption();
|
const serverSideEncryption = bucket.getServerSideEncryption();
|
||||||
if (bucket.hasTransientFlag() ||
|
|
||||||
bucket.hasDeletedFlag()) {
|
|
||||||
log.trace('transient or deleted flag so cleaning up bucket');
|
|
||||||
return cleanUpBucket(bucket,
|
|
||||||
canonicalID, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error cleaning up bucket with flag',
|
|
||||||
{ error: err,
|
|
||||||
transientFlag:
|
|
||||||
bucket.hasTransientFlag(),
|
|
||||||
deletedFlag:
|
|
||||||
bucket.hasDeletedFlag(),
|
|
||||||
});
|
|
||||||
// To avoid confusing user with error
|
|
||||||
// from cleaning up
|
|
||||||
// bucket return InternalError
|
|
||||||
return callback(errors.InternalError, null,
|
|
||||||
corsHeaders);
|
|
||||||
}
|
|
||||||
if (serverSideEncryption) {
|
if (serverSideEncryption) {
|
||||||
return kms.createCipherBundle(
|
return kms.createCipherBundle(
|
||||||
serverSideEncryption,
|
serverSideEncryption, log, next);
|
||||||
log, (err, cipherBundle) => {
|
}
|
||||||
|
return next(null, null);
|
||||||
|
},
|
||||||
|
function objectCreateAndStore(cipherBundle, next) {
|
||||||
|
return createAndStoreObject(bucketName,
|
||||||
|
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
||||||
|
request, false, streamingV4Params, log, next);
|
||||||
|
},
|
||||||
|
], (err, storingResult) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(errors.InternalError,
|
return callback(err, responseHeaders);
|
||||||
null, corsHeaders);
|
|
||||||
}
|
}
|
||||||
return _storeIt(bucketName, objectKey,
|
const newByteLength = request.parsedContentLength;
|
||||||
objMD, authInfo, canonicalID,
|
|
||||||
cipherBundle, request,
|
// Utapi expects null or a number for oldByteLength:
|
||||||
streamingV4Params, log,
|
// * null - new object
|
||||||
(err, contentMD5) =>
|
// * 0 or > 0 - existing object with content-length 0 or > 0
|
||||||
callback(err, contentMD5,
|
// objMD here is the master version that we would
|
||||||
corsHeaders));
|
// have overwritten if there was an existing version or object
|
||||||
|
//
|
||||||
|
// TODO: Handle utapi metrics for null version overwrites.
|
||||||
|
const oldByteLength = objMD && objMD['content-length']
|
||||||
|
!== undefined ? objMD['content-length'] : null;
|
||||||
|
if (storingResult) {
|
||||||
|
// ETag's hex should always be enclosed in quotes
|
||||||
|
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
|
||||||
|
}
|
||||||
|
const vcfg = bucket.getVersioningConfiguration();
|
||||||
|
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||||
|
if (isVersionedObj) {
|
||||||
|
if (storingResult && storingResult.versionId) {
|
||||||
|
responseHeaders['x-amz-version-id'] =
|
||||||
|
versionIdUtils.encode(storingResult.versionId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pushMetric('putObject', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
newByteLength,
|
||||||
|
oldByteLength: isVersionedObj ? null : oldByteLength,
|
||||||
});
|
});
|
||||||
}
|
return callback(null, responseHeaders);
|
||||||
return _storeIt(bucketName, objectKey, objMD,
|
|
||||||
authInfo, canonicalID, null, request,
|
|
||||||
streamingV4Params, log,
|
|
||||||
(err, contentMD5) =>
|
|
||||||
callback(err, contentMD5, corsHeaders));
|
|
||||||
});
|
});
|
||||||
}
|
|
||||||
if (serverSideEncryption) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryption,
|
|
||||||
log, (err, cipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(errors.InternalError, null,
|
|
||||||
corsHeaders);
|
|
||||||
}
|
|
||||||
return _storeIt(bucketName, objectKey, objMD,
|
|
||||||
authInfo, canonicalID, cipherBundle,
|
|
||||||
request, streamingV4Params, log,
|
|
||||||
(err, contentMD5) =>
|
|
||||||
callback(err, contentMD5, corsHeaders));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
|
|
||||||
null, request, streamingV4Params, log,
|
|
||||||
(err, contentMD5) =>
|
|
||||||
callback(err, contentMD5, corsHeaders));
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectPut;
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
import { errors } from 'arsenal';
|
const async = require('async');
|
||||||
import async from 'async';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import acl from '../metadata/acl';
|
const acl = require('../metadata/acl');
|
||||||
import aclUtils from '../utilities/aclUtils';
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import services from '../services';
|
const vault = require('../auth/vault');
|
||||||
import vault from '../auth/vault';
|
const { decodeVersionId, getVersionIdResHeader }
|
||||||
|
= require('./apiUtils/object/versioning');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml request:
|
Format of xml request:
|
||||||
|
@ -39,7 +41,7 @@ import vault from '../auth/vault';
|
||||||
* @param {function} cb - cb to server
|
* @param {function} cb - cb to server
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function objectPutACL(authInfo, request, log, cb) {
|
function objectPutACL(authInfo, request, log, cb) {
|
||||||
log.debug('processing request', { method: 'objectPutACL' });
|
log.debug('processing request', { method: 'objectPutACL' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const objectKey = request.objectKey;
|
const objectKey = request.objectKey;
|
||||||
|
@ -61,13 +63,24 @@ export default function objectPutACL(authInfo, request, log, cb) {
|
||||||
constants.logId,
|
constants.logId,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return cb(decodedVidResult);
|
||||||
|
}
|
||||||
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
const metadataValParams = {
|
const metadataValParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
requestType: 'objectPutACL',
|
requestType: 'objectPutACL',
|
||||||
log,
|
versionId: reqVersionId,
|
||||||
};
|
};
|
||||||
|
|
||||||
const possibleGrants = ['FULL_CONTROL', 'WRITE_ACP', 'READ', 'READ_ACP'];
|
const possibleGrants = ['FULL_CONTROL', 'WRITE_ACP', 'READ', 'READ_ACP'];
|
||||||
const addACLParams = {
|
const addACLParams = {
|
||||||
Canned: '',
|
Canned: '',
|
||||||
|
@ -88,11 +101,27 @@ export default function objectPutACL(authInfo, request, log, cb) {
|
||||||
request.headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
request.headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => services.metadataValidateAuthorization(metadataValParams, next),
|
function validateBucketAndObj(next) {
|
||||||
(bucket, objectMD, next) => {
|
return metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
if (!objectMD) {
|
(err, bucket, objectMD) => {
|
||||||
return next(errors.NoSuchKey, bucket);
|
if (err) {
|
||||||
|
return next(err);
|
||||||
}
|
}
|
||||||
|
if (!objectMD) {
|
||||||
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
|
return next(err, bucket);
|
||||||
|
}
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
log.trace('delete marker detected',
|
||||||
|
{ method: 'objectPutACL' });
|
||||||
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
|
}
|
||||||
|
return next(null, bucket, objectMD);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
function parseAclFromXml(bucket, objectMD, next) {
|
||||||
|
metadataValParams.versionId = objectMD.versionId;
|
||||||
// If not setting acl through headers, parse body
|
// If not setting acl through headers, parse body
|
||||||
let jsonGrants;
|
let jsonGrants;
|
||||||
let aclOwnerID;
|
let aclOwnerID;
|
||||||
|
@ -117,7 +146,7 @@ export default function objectPutACL(authInfo, request, log, cb) {
|
||||||
log.debug('using acls from request headers');
|
log.debug('using acls from request headers');
|
||||||
return next(null, bucket, objectMD, jsonGrants, aclOwnerID);
|
return next(null, bucket, objectMD, jsonGrants, aclOwnerID);
|
||||||
},
|
},
|
||||||
(bucket, objectMD, jsonGrants, aclOwnerID, next) => {
|
function processAcls(bucket, objectMD, jsonGrants, aclOwnerID, next) {
|
||||||
if (newCannedACL) {
|
if (newCannedACL) {
|
||||||
log.debug('canned acl', { cannedAcl: newCannedACL });
|
log.debug('canned acl', { cannedAcl: newCannedACL });
|
||||||
addACLParams.Canned = newCannedACL;
|
addACLParams.Canned = newCannedACL;
|
||||||
|
@ -239,25 +268,35 @@ export default function objectPutACL(authInfo, request, log, cb) {
|
||||||
aclUtils.sortHeaderGrants(allUsers, addACLParams);
|
aclUtils.sortHeaderGrants(allUsers, addACLParams);
|
||||||
return next(null, bucket, objectMD, revisedAddACLParams);
|
return next(null, bucket, objectMD, revisedAddACLParams);
|
||||||
},
|
},
|
||||||
function waterfall4(bucket, objectMD, ACLParams, next) {
|
function addAclsToObjMD(bucket, objectMD, ACLParams, next) {
|
||||||
// Add acl's to object metadata
|
// Add acl's to object metadata
|
||||||
acl.addObjectACL(bucket, objectKey, objectMD, ACLParams, log, next);
|
const params = metadataValParams.versionId ?
|
||||||
|
{ versionId: metadataValParams.versionId } : {};
|
||||||
|
acl.addObjectACL(bucket, objectKey, objectMD,
|
||||||
|
ACLParams, params, log, err => next(err, bucket, objectMD));
|
||||||
},
|
},
|
||||||
], (err, bucket) => {
|
], (err, bucket, objectMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const resHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error processing request', {
|
log.trace('error processing request', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutACL',
|
method: 'objectPutACL',
|
||||||
});
|
});
|
||||||
return cb(err, corsHeaders);
|
return cb(err, resHeaders);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
resHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objectMD);
|
||||||
|
|
||||||
log.trace('processed request successfully in object put acl api');
|
log.trace('processed request successfully in object put acl api');
|
||||||
pushMetric('putObjectAcl', log, {
|
pushMetric('putObjectAcl', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
});
|
});
|
||||||
return cb(null, corsHeaders);
|
return cb(null, resHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectPutACL;
|
||||||
|
|
|
@ -1,16 +1,20 @@
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import { errors } from 'arsenal';
|
const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const { BackendInfo } = require('./apiUtils/object/BackendInfo');
|
||||||
import data from '../data/wrapper';
|
const constants = require('../../constants');
|
||||||
import kms from '../kms/wrapper';
|
const data = require('../data/wrapper');
|
||||||
import metadata from '../metadata/wrapper';
|
const kms = require('../kms/wrapper');
|
||||||
import RelayMD5Sum from '../utilities/RelayMD5Sum';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { logger } from '../utilities/logger';
|
const RelayMD5Sum = require('../utilities/RelayMD5Sum');
|
||||||
import services from '../services';
|
const logger = require('../utilities/logger');
|
||||||
import setUpCopyLocator from './apiUtils/object/setUpCopyLocator';
|
const services = require('../services');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
||||||
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
|
||||||
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -21,22 +25,23 @@ import validateHeaders from '../utilities/validateHeaders';
|
||||||
* includes normalized headers
|
* includes normalized headers
|
||||||
* @param {string} sourceBucket - name of source bucket for object copy
|
* @param {string} sourceBucket - name of source bucket for object copy
|
||||||
* @param {string} sourceObject - name of source object for object copy
|
* @param {string} sourceObject - name of source object for object copy
|
||||||
|
* @param {string} reqVersionId - versionId of the source object for copy
|
||||||
* @param {object} log - the request logger
|
* @param {object} log - the request logger
|
||||||
* @param {function} callback - final callback to call with the result
|
* @param {function} callback - final callback to call with the result
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function objectPutCopyPart(authInfo, request, sourceBucket,
|
function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
sourceObject, log, callback) {
|
sourceObject, reqVersionId, log, callback) {
|
||||||
log.debug('processing request', { method: 'objectPutCopyPart' });
|
log.debug('processing request', { method: 'objectPutCopyPart' });
|
||||||
const destBucketName = request.bucketName;
|
const destBucketName = request.bucketName;
|
||||||
const destObjectKey = request.objectKey;
|
const destObjectKey = request.objectKey;
|
||||||
|
const mpuBucketName = `${constants.mpuBucketPrefix}${destBucketName}`;
|
||||||
const valGetParams = {
|
const valGetParams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName: sourceBucket,
|
bucketName: sourceBucket,
|
||||||
objectKey: sourceObject,
|
objectKey: sourceObject,
|
||||||
|
versionId: reqVersionId,
|
||||||
requestType: 'objectGet',
|
requestType: 'objectGet',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||||
|
@ -57,7 +62,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
objectKey: destObjectKey,
|
objectKey: destObjectKey,
|
||||||
requestType: 'objectPut',
|
requestType: 'objectPut',
|
||||||
log,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// For validating the request at the MPU, the params are the same
|
// For validating the request at the MPU, the params are the same
|
||||||
|
@ -74,11 +78,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
bucketName: destBucketName,
|
bucketName: destBucketName,
|
||||||
owner: authInfo.getCanonicalID(),
|
owner: authInfo.getCanonicalID(),
|
||||||
namespace: request.namespace,
|
namespace: request.namespace,
|
||||||
|
objectKey: destObjectKey,
|
||||||
|
partNumber: paddedPartNumber,
|
||||||
|
uploadId,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function checkDestAuth(next) {
|
function checkDestAuth(next) {
|
||||||
return services.metadataValidateAuthorization(valPutParams,
|
return metadataValidateBucketAndObj(valPutParams, log,
|
||||||
(err, destBucketMD) => {
|
(err, destBucketMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating authorization for ' +
|
log.debug('error validating authorization for ' +
|
||||||
|
@ -97,7 +104,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkSourceAuthorization(destBucketMD, next) {
|
function checkSourceAuthorization(destBucketMD, next) {
|
||||||
return services.metadataValidateAuthorization(valGetParams,
|
return metadataValidateBucketAndObj(valGetParams, log,
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
|
@ -106,6 +113,23 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
if (!sourceObjMD) {
|
if (!sourceObjMD) {
|
||||||
log.debug('no source object', { sourceObject });
|
log.debug('no source object', { sourceObject });
|
||||||
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
|
return next(err, destBucketMD);
|
||||||
|
}
|
||||||
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
|
log.debug('delete marker on source object',
|
||||||
|
{ sourceObject });
|
||||||
|
if (reqVersionId) {
|
||||||
|
const err = errors.InvalidRequest
|
||||||
|
.customizeDescription('The source of a copy ' +
|
||||||
|
'request may not specifically refer to a delete' +
|
||||||
|
'marker by version id.');
|
||||||
|
return next(err, destBucketMD);
|
||||||
|
}
|
||||||
|
// if user specifies a key in a versioned source bucket
|
||||||
|
// without specifying a version, and the object has a
|
||||||
|
// delete marker, return NoSuchKey
|
||||||
return next(errors.NoSuchKey, destBucketMD);
|
return next(errors.NoSuchKey, destBucketMD);
|
||||||
}
|
}
|
||||||
const headerValResult =
|
const headerValResult =
|
||||||
|
@ -118,33 +142,92 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
if (copyLocator.error) {
|
if (copyLocator.error) {
|
||||||
return next(copyLocator.error, destBucketMD);
|
return next(copyLocator.error, destBucketMD);
|
||||||
}
|
}
|
||||||
|
let sourceVerId = undefined;
|
||||||
|
// If specific version requested, include copy source
|
||||||
|
// version id in response. Include in request by default
|
||||||
|
// if versioning is enabled or suspended.
|
||||||
|
if (sourceBucketMD.getVersioningConfiguration() ||
|
||||||
|
reqVersionId) {
|
||||||
|
if (sourceObjMD.isNull || !sourceObjMD.versionId) {
|
||||||
|
sourceVerId = 'null';
|
||||||
|
} else {
|
||||||
|
sourceVerId =
|
||||||
|
versionIdUtils.encode(sourceObjMD.versionId);
|
||||||
|
}
|
||||||
|
}
|
||||||
return next(null, copyLocator.dataLocator, destBucketMD,
|
return next(null, copyLocator.dataLocator, destBucketMD,
|
||||||
copyLocator.copyObjectSize);
|
copyLocator.copyObjectSize, sourceVerId);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function checkMPUBucketAuth(dataLocator, destBucketMD,
|
// get MPU shadow bucket to get splitter based on MD version
|
||||||
copyObjectSize, next) {
|
function getMpuShadowBucket(dataLocator, destBucketMD,
|
||||||
return services.metadataValidateMultipart(valMPUParams,
|
copyObjectSize, sourceVerId, next) {
|
||||||
|
return metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err) {
|
if (err && err.NoSuchBucket) {
|
||||||
log.trace('error authorizing based on mpu bucket',
|
return next(errors.NoSuchUpload);
|
||||||
{ error: err });
|
|
||||||
return next(err, destBucketMD);
|
|
||||||
}
|
}
|
||||||
return next(null, dataLocator,
|
if (err) {
|
||||||
destBucketMD, mpuBucket, copyObjectSize);
|
log.error('error getting the shadow mpu bucket', {
|
||||||
|
error: err,
|
||||||
|
method: 'objectPutCopyPart::metadata.getBucket',
|
||||||
|
});
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
let splitter = constants.splitter;
|
||||||
|
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||||
|
splitter = constants.oldSplitter;
|
||||||
|
}
|
||||||
|
return next(null, dataLocator, destBucketMD,
|
||||||
|
copyObjectSize, sourceVerId, splitter);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
// Get MPU overview object to check authorization to put a part
|
||||||
|
// and to get any object location constraint info
|
||||||
|
function getMpuOverviewObject(dataLocator, destBucketMD,
|
||||||
|
copyObjectSize, sourceVerId, splitter, next) {
|
||||||
|
const mpuOverviewKey =
|
||||||
|
`overview${splitter}${destObjectKey}${splitter}${uploadId}`;
|
||||||
|
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
||||||
|
null, log, (err, res) => {
|
||||||
|
if (err) {
|
||||||
|
if (err.NoSuchKey) {
|
||||||
|
return next(errors.NoSuchUpload);
|
||||||
|
}
|
||||||
|
log.error('error getting overview object from ' +
|
||||||
|
'mpu bucket', {
|
||||||
|
error: err,
|
||||||
|
method: 'objectPutCopyPart::' +
|
||||||
|
'metadata.getObjectMD',
|
||||||
|
});
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
const initiatorID = res.initiator.ID;
|
||||||
|
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
||||||
|
authInfo.getArn() : authInfo.getCanonicalID();
|
||||||
|
if (initiatorID !== requesterID) {
|
||||||
|
return next(errors.AccessDenied);
|
||||||
|
}
|
||||||
|
const objectLocationConstraint =
|
||||||
|
res.controllingLocationConstraint;
|
||||||
|
return next(null, dataLocator, destBucketMD,
|
||||||
|
objectLocationConstraint, copyObjectSize,
|
||||||
|
sourceVerId);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function goGetData(dataLocator, destBucketMD,
|
function goGetData(dataLocator, destBucketMD,
|
||||||
mpuBucket, copyObjectSize, next) {
|
objectLocationConstraint, copyObjectSize, sourceVerId, next) {
|
||||||
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
||||||
|
|
||||||
// skip if 0 byte object
|
// skip if 0 byte object
|
||||||
if (dataLocator.length === 0) {
|
if (dataLocator.length === 0) {
|
||||||
return next(null, [], constants.emptyFileMd5,
|
return process.nextTick(() => {
|
||||||
copyObjectSize, mpuBucket,
|
next(null, destBucketMD, [], constants.emptyFileMd5,
|
||||||
serverSideEncryption, destBucketMD);
|
copyObjectSize, sourceVerId, serverSideEncryption);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
const backendInfo = new BackendInfo(objectLocationConstraint);
|
||||||
|
|
||||||
// totalHash will be sent through the RelayMD5Sum transform streams
|
// totalHash will be sent through the RelayMD5Sum transform streams
|
||||||
// to collect the md5 from multiple streams
|
// to collect the md5 from multiple streams
|
||||||
let totalHash;
|
let totalHash;
|
||||||
|
@ -177,7 +260,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
return cb(errors.InternalError);
|
return cb(errors.InternalError);
|
||||||
}
|
}
|
||||||
return data.put(cipherBundle, hashedStream,
|
return data.put(cipherBundle, hashedStream,
|
||||||
numberPartSize, dataStoreContext, log,
|
numberPartSize, dataStoreContext,
|
||||||
|
backendInfo, log,
|
||||||
(error, partRetrievalInfo) => {
|
(error, partRetrievalInfo) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
log.debug('error putting ' +
|
log.debug('error putting ' +
|
||||||
|
@ -209,7 +293,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
// Copied object is not encrypted so just put it
|
// Copied object is not encrypted so just put it
|
||||||
// without a cipherBundle
|
// without a cipherBundle
|
||||||
return data.put(null, hashedStream, numberPartSize,
|
return data.put(null, hashedStream, numberPartSize,
|
||||||
dataStoreContext, log, (error, partRetrievalInfo) => {
|
dataStoreContext, backendInfo,
|
||||||
|
log, (error, partRetrievalInfo) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
log.debug('error putting object part',
|
log.debug('error putting object part',
|
||||||
{ error });
|
{ error });
|
||||||
|
@ -232,23 +317,21 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
// Digest the final combination of all of the part streams
|
// Digest the final combination of all of the part streams
|
||||||
totalHash = totalHash.digest('hex');
|
totalHash = totalHash.digest('hex');
|
||||||
return next(null, locations, totalHash,
|
return next(null, destBucketMD, locations, totalHash,
|
||||||
copyObjectSize, mpuBucket,
|
copyObjectSize, sourceVerId, serverSideEncryption);
|
||||||
serverSideEncryption, destBucketMD);
|
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function getExistingPartInfo(locations, totalHash,
|
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
||||||
copyObjectSize, mpuBucket, serverSideEncryption, destBucketMD,
|
copyObjectSize, sourceVerId, serverSideEncryption, next) {
|
||||||
next) {
|
|
||||||
const partKey =
|
const partKey =
|
||||||
`${uploadId}${constants.splitter}${paddedPartNumber}`;
|
`${uploadId}${constants.splitter}${paddedPartNumber}`;
|
||||||
metadata.getObjectMD(mpuBucket.getName(), partKey, log,
|
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
// If there is nothing being overwritten just move on
|
// If there is nothing being overwritten just move on
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.NoSuchKey) {
|
||||||
log.debug('error getting current part (if any)',
|
log.debug('error getting current part (if any)',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, destBucketMD);
|
return next(err);
|
||||||
}
|
}
|
||||||
let oldLocations;
|
let oldLocations;
|
||||||
if (result) {
|
if (result) {
|
||||||
|
@ -259,14 +342,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
oldLocations = Array.isArray(oldLocations) ?
|
oldLocations = Array.isArray(oldLocations) ?
|
||||||
oldLocations : [oldLocations];
|
oldLocations : [oldLocations];
|
||||||
}
|
}
|
||||||
return next(null, locations, totalHash,
|
return next(null, destBucketMD, locations, totalHash,
|
||||||
copyObjectSize, mpuBucket, serverSideEncryption,
|
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||||
oldLocations, destBucketMD);
|
oldLocations);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function storeNewPartMetadata(locations, totalHash,
|
function storeNewPartMetadata(destBucketMD, locations, totalHash,
|
||||||
copyObjectSize, mpuBucket, serverSideEncryption,
|
copyObjectSize, sourceVerId, serverSideEncryption, oldLocations,
|
||||||
oldLocations, destBucketMD, next) {
|
next) {
|
||||||
const lastModified = new Date().toJSON();
|
const lastModified = new Date().toJSON();
|
||||||
const metaStoreParams = {
|
const metaStoreParams = {
|
||||||
partNumber: paddedPartNumber,
|
partNumber: paddedPartNumber,
|
||||||
|
@ -276,25 +359,26 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
splitter: constants.splitter,
|
splitter: constants.splitter,
|
||||||
lastModified,
|
lastModified,
|
||||||
};
|
};
|
||||||
return services.metadataStorePart(mpuBucket.getName(),
|
return services.metadataStorePart(mpuBucketName,
|
||||||
locations, metaStoreParams, log, err => {
|
locations, metaStoreParams, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error storing new metadata',
|
log.debug('error storing new metadata',
|
||||||
{ error: err, method: 'storeNewPartMetadata' });
|
{ error: err, method: 'storeNewPartMetadata' });
|
||||||
return next(err, destBucketMD);
|
return next(err);
|
||||||
}
|
}
|
||||||
// Clean up the old data now that new metadata (with new
|
// Clean up the old data now that new metadata (with new
|
||||||
// data locations) has been stored
|
// data locations) has been stored
|
||||||
if (oldLocations) {
|
if (oldLocations) {
|
||||||
data.batchDelete(oldLocations,
|
data.batchDelete(oldLocations, request.method, null,
|
||||||
logger.newRequestLoggerFromSerializedUids(
|
logger.newRequestLoggerFromSerializedUids(
|
||||||
log.getSerializedUids()));
|
log.getSerializedUids()));
|
||||||
}
|
}
|
||||||
return next(null, destBucketMD, totalHash, lastModified,
|
return next(null, destBucketMD, totalHash, lastModified,
|
||||||
serverSideEncryption);
|
sourceVerId, serverSideEncryption);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
], (err, destBucketMD, totalHash, lastModified, serverSideEncryption) => {
|
], (err, destBucketMD, totalHash, lastModified, sourceVerId,
|
||||||
|
serverSideEncryption) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, destBucketMD);
|
request.method, destBucketMD);
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -310,8 +394,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
'<ETag>"', totalHash, '"</ETag>',
|
'<ETag>"', totalHash, '"</ETag>',
|
||||||
'</CopyPartResult>',
|
'</CopyPartResult>',
|
||||||
].join('');
|
].join('');
|
||||||
// TODO: Add version headers for response
|
|
||||||
// (if source is a version).
|
|
||||||
const additionalHeaders = corsHeaders || {};
|
const additionalHeaders = corsHeaders || {};
|
||||||
if (serverSideEncryption) {
|
if (serverSideEncryption) {
|
||||||
additionalHeaders['x-amz-server-side-encryption'] =
|
additionalHeaders['x-amz-server-side-encryption'] =
|
||||||
|
@ -322,6 +405,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
serverSideEncryption.masterKeyId;
|
serverSideEncryption.masterKeyId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId;
|
||||||
// TODO push metric for objectPutCopyPart
|
// TODO push metric for objectPutCopyPart
|
||||||
// pushMetric('putObjectCopyPart', log, {
|
// pushMetric('putObjectCopyPart', log, {
|
||||||
// bucket: destBucketName,
|
// bucket: destBucketName,
|
||||||
|
@ -329,3 +413,5 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
return callback(null, xml, additionalHeaders);
|
return callback(null, xml, additionalHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectPutCopyPart;
|
||||||
|
|
|
@ -1,16 +1,17 @@
|
||||||
import assert from 'assert';
|
const assert = require('assert');
|
||||||
import async from 'async';
|
const async = require('async');
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const { BackendInfo } = require('./apiUtils/object/BackendInfo');
|
||||||
import data from '../data/wrapper';
|
const constants = require('../../constants');
|
||||||
import { dataStore } from './apiUtils/object/storeObject';
|
const data = require('../data/wrapper');
|
||||||
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
|
const { dataStore } = require('./apiUtils/object/storeObject');
|
||||||
import kms from '../kms/wrapper';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import metadata from '../metadata/wrapper';
|
const kms = require('../kms/wrapper');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
const metadata = require('../metadata/wrapper');
|
||||||
import { logger } from '../utilities/logger';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
|
||||||
|
|
||||||
// We pad the partNumbers so that the parts will be sorted in numerical order.
|
// We pad the partNumbers so that the parts will be sorted in numerical order.
|
||||||
|
@ -44,7 +45,7 @@ function _getPartKey(uploadId, splitter, paddedPartNumber) {
|
||||||
* @param {function} cb - final callback to call with the result
|
* @param {function} cb - final callback to call with the result
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
cb) {
|
cb) {
|
||||||
log.debug('processing request', { method: 'objectPutPart' });
|
log.debug('processing request', { method: 'objectPutPart' });
|
||||||
const size = request.parsedContentLength;
|
const size = request.parsedContentLength;
|
||||||
|
@ -83,33 +84,35 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
next => metadata.getBucket(bucketName, log, (err, bucket) => {
|
next => metadata.getBucket(bucketName, log,
|
||||||
|
(err, destinationBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err && err.NoSuchBucket) {
|
||||||
return next(errors.NoSuchBucket);
|
return next(errors.NoSuchBucket, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error getting the destination bucket', {
|
log.error('error getting the destination bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getBucket',
|
method: 'objectPutPart::metadata.getBucket',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket);
|
return next(null, destinationBucket);
|
||||||
}),
|
}),
|
||||||
// Check the bucket authorization.
|
// Check the bucket authorization.
|
||||||
(bucket, next) => {
|
(destinationBucket, next) => {
|
||||||
// For validating the request at the destinationBucket level the
|
// For validating the request at the destinationBucket level the
|
||||||
// `requestType` is the general 'objectPut'.
|
// `requestType` is the general 'objectPut'.
|
||||||
const requestType = 'objectPut';
|
const requestType = 'objectPut';
|
||||||
if (!isBucketAuthorized(bucket, requestType, canonicalID)) {
|
if (!isBucketAuthorized(destinationBucket, requestType,
|
||||||
|
canonicalID)) {
|
||||||
log.debug('access denied for user on bucket', { requestType });
|
log.debug('access denied for user on bucket', { requestType });
|
||||||
return next(errors.AccessDenied, bucket);
|
return next(errors.AccessDenied, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket);
|
return next(null, destinationBucket);
|
||||||
},
|
},
|
||||||
// Get bucket server-side encryption, if it exists.
|
// Get bucket server-side encryption, if it exists.
|
||||||
(bucket, next) => {
|
(destinationBucket, next) => {
|
||||||
const encryption = bucket.getServerSideEncryption();
|
const encryption = destinationBucket.getServerSideEncryption();
|
||||||
// If bucket has server-side encryption, pass the `res` value
|
// If bucket has server-side encryption, pass the `res` value
|
||||||
if (encryption) {
|
if (encryption) {
|
||||||
return kms.createCipherBundle(encryption, log, (err, res) => {
|
return kms.createCipherBundle(encryption, log, (err, res) => {
|
||||||
|
@ -118,61 +121,68 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
'the destination bucket', {
|
'the destination bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, res, bucket);
|
return next(null, destinationBucket, res);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
// The bucket does not have server-side encryption, so pass `null`
|
// The bucket does not have server-side encryption, so pass `null`
|
||||||
return next(null, null, bucket);
|
return next(null, destinationBucket, null);
|
||||||
},
|
},
|
||||||
// Get the MPU shadow bucket.
|
// Get the MPU shadow bucket.
|
||||||
(cipherBundle, bucket, next) => metadata.getBucket(mpuBucketName, log,
|
(destinationBucket, cipherBundle, next) =>
|
||||||
|
metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err && err.NoSuchBucket) {
|
||||||
return next(errors.NoSuchUpload, bucket);
|
return next(errors.NoSuchUpload, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error getting the shadow mpu bucket', {
|
log.error('error getting the shadow mpu bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getBucket',
|
method: 'objectPutPart::metadata.getBucket',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
let splitter = constants.splitter;
|
let splitter = constants.splitter;
|
||||||
// BACKWARD: Remove to remove the old splitter
|
// BACKWARD: Remove to remove the old splitter
|
||||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||||
splitter = constants.oldSplitter;
|
splitter = constants.oldSplitter;
|
||||||
}
|
}
|
||||||
return next(null, cipherBundle, splitter, bucket);
|
return next(null, destinationBucket, cipherBundle, splitter);
|
||||||
}),
|
}),
|
||||||
// Check authorization of the MPU shadow bucket.
|
// Check authorization of the MPU shadow bucket.
|
||||||
(cipherBundle, splitter, bucket, next) => {
|
(destinationBucket, cipherBundle, splitter, next) => {
|
||||||
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
|
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
|
||||||
uploadId);
|
uploadId);
|
||||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, log,
|
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error getting the object from mpu bucket', {
|
log.error('error getting the object from mpu bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getObjectMD',
|
method: 'objectPutPart::metadata.getObjectMD',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
const initiatorID = res.initiator.ID;
|
const initiatorID = res.initiator.ID;
|
||||||
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
||||||
authInfo.getArn() : authInfo.getCanonicalID();
|
authInfo.getArn() : authInfo.getCanonicalID();
|
||||||
if (initiatorID !== requesterID) {
|
if (initiatorID !== requesterID) {
|
||||||
return next(errors.AccessDenied, bucket);
|
return next(errors.AccessDenied, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, cipherBundle, splitter, bucket);
|
|
||||||
|
const objectLocationConstraint =
|
||||||
|
res.controllingLocationConstraint;
|
||||||
|
return next(null, destinationBucket,
|
||||||
|
objectLocationConstraint,
|
||||||
|
cipherBundle, splitter);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
// Get any pre-existing part.
|
// Get any pre-existing part.
|
||||||
(cipherBundle, splitter, bucket, next) => {
|
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||||
|
splitter, next) => {
|
||||||
const paddedPartNumber = _getPaddedPartNumber(partNumber);
|
const paddedPartNumber = _getPaddedPartNumber(partNumber);
|
||||||
const partKey = _getPartKey(uploadId, splitter, paddedPartNumber);
|
const partKey = _getPartKey(uploadId, splitter, paddedPartNumber);
|
||||||
return metadata.getObjectMD(mpuBucketName, partKey, log,
|
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
// If there is no object with the same key, continue.
|
// If there is no object with the same key, continue.
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.NoSuchKey) {
|
||||||
|
@ -180,7 +190,7 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getObjectMD',
|
method: 'objectPutPart::metadata.getObjectMD',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
let prevObjectSize = null;
|
let prevObjectSize = null;
|
||||||
let oldLocations = null;
|
let oldLocations = null;
|
||||||
|
@ -194,29 +204,38 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
oldLocations = Array.isArray(res.partLocations) ?
|
oldLocations = Array.isArray(res.partLocations) ?
|
||||||
res.partLocations : [res.partLocations];
|
res.partLocations : [res.partLocations];
|
||||||
}
|
}
|
||||||
return next(null, cipherBundle, partKey, prevObjectSize,
|
return next(null, destinationBucket,
|
||||||
oldLocations, bucket);
|
objectLocationConstraint, cipherBundle,
|
||||||
|
partKey, prevObjectSize, oldLocations);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
// Store in data backend.
|
// Store in data backend.
|
||||||
(cipherBundle, partKey, prevObjectSize, oldLocations, bucket, next) => {
|
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||||
|
partKey, prevObjectSize, oldLocations, next) => {
|
||||||
const objectKeyContext = {
|
const objectKeyContext = {
|
||||||
bucketName,
|
bucketName,
|
||||||
owner: canonicalID,
|
owner: canonicalID,
|
||||||
namespace: request.namespace,
|
namespace: request.namespace,
|
||||||
|
objectKey,
|
||||||
|
partNumber: _getPaddedPartNumber(partNumber),
|
||||||
|
uploadId,
|
||||||
};
|
};
|
||||||
|
const backendInfo = new BackendInfo(objectLocationConstraint);
|
||||||
|
|
||||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||||
streamingV4Params, log, (err, dataGetInfo, hexDigest) => {
|
streamingV4Params, backendInfo, log,
|
||||||
|
(err, dataGetInfo, hexDigest) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, dataGetInfo, hexDigest, cipherBundle,
|
return next(null, destinationBucket, dataGetInfo, hexDigest,
|
||||||
partKey, prevObjectSize, oldLocations, bucket);
|
cipherBundle, partKey, prevObjectSize, oldLocations,
|
||||||
|
objectLocationConstraint);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
// Store data locations in metadata and delete any overwritten data.
|
// Store data locations in metadata and delete any overwritten data.
|
||||||
(dataGetInfo, hexDigest, cipherBundle, partKey, prevObjectSize,
|
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
||||||
oldLocations, bucket, next) => {
|
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
|
||||||
// Use an array to be consistent with objectPutCopyPart where there
|
// Use an array to be consistent with objectPutCopyPart where there
|
||||||
// could be multiple locations.
|
// could be multiple locations.
|
||||||
const partLocations = [dataGetInfo];
|
const partLocations = [dataGetInfo];
|
||||||
|
@ -238,29 +257,31 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
'content-md5': hexDigest,
|
'content-md5': hexDigest,
|
||||||
'content-length': size,
|
'content-length': size,
|
||||||
};
|
};
|
||||||
return metadata.putObjectMD(mpuBucketName, partKey, omVal, log,
|
return metadata.putObjectMD(mpuBucketName, partKey, omVal, {}, log,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error putting object in mpu bucket', {
|
log.error('error putting object in mpu bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.putObjectMD',
|
method: 'objectPutPart::metadata.putObjectMD',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
// Clean up any old data now that new metadata (with new
|
// Clean up any old data now that new metadata (with new
|
||||||
// data locations) has been stored.
|
// data locations) has been stored.
|
||||||
if (oldLocations) {
|
if (oldLocations) {
|
||||||
log.trace('Overwriting MPU part, deleting data');
|
log.trace('Overwriting MPU part, deleting data');
|
||||||
data.batchDelete(oldLocations, logger
|
data.batchDelete(oldLocations, request.method,
|
||||||
.newRequestLoggerFromSerializedUids(log
|
objectLocationConstraint,
|
||||||
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
.getSerializedUids()));
|
.getSerializedUids()));
|
||||||
}
|
}
|
||||||
return next(null, bucket, hexDigest, prevObjectSize);
|
return next(null, destinationBucket,
|
||||||
|
hexDigest, prevObjectSize);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
], (err, bucket, hexDigest, prevObjectSize) => {
|
], (err, destinationBucket, hexDigest, prevObjectSize) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, destinationBucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error in object put part (upload part)', {
|
log.error('error in object put part (upload part)', {
|
||||||
error: err,
|
error: err,
|
||||||
|
@ -277,3 +298,5 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
return cb(null, hexDigest, corsHeaders);
|
return cb(null, hexDigest, corsHeaders);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = objectPutPart;
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
const async = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { decodeVersionId, getVersionIdResHeader } =
|
||||||
|
require('./apiUtils/object/versioning');
|
||||||
|
|
||||||
|
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { parseTagXml } = require('./apiUtils/object/tagging');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Object Put Tagging - Adds tag(s) to object
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function objectPutTagging(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'objectPutTagging' });
|
||||||
|
|
||||||
|
const bucketName = request.bucketName;
|
||||||
|
const objectKey = request.objectKey;
|
||||||
|
|
||||||
|
const decodedVidResult = decodeVersionId(request.query);
|
||||||
|
if (decodedVidResult instanceof Error) {
|
||||||
|
log.trace('invalid versionId query', {
|
||||||
|
versionId: request.query.versionId,
|
||||||
|
error: decodedVidResult,
|
||||||
|
});
|
||||||
|
return process.nextTick(() => callback(decodedVidResult));
|
||||||
|
}
|
||||||
|
const reqVersionId = decodedVidResult;
|
||||||
|
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
requestType: 'bucketOwnerAction',
|
||||||
|
versionId: reqVersionId,
|
||||||
|
};
|
||||||
|
|
||||||
|
return async.waterfall([
|
||||||
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
|
(err, bucket, objectMD) => {
|
||||||
|
if (err) {
|
||||||
|
log.trace('request authorization failed',
|
||||||
|
{ method: 'objectPutTagging', error: err });
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
if (!objectMD) {
|
||||||
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
|
errors.NoSuchKey;
|
||||||
|
log.trace('error no object metadata found',
|
||||||
|
{ method: 'objectPutTagging', error: err });
|
||||||
|
return next(err, bucket);
|
||||||
|
}
|
||||||
|
if (objectMD.isDeleteMarker) {
|
||||||
|
log.trace('version is a delete marker',
|
||||||
|
{ method: 'objectPutTagging' });
|
||||||
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
|
}
|
||||||
|
return next(null, bucket, objectMD);
|
||||||
|
}),
|
||||||
|
(bucket, objectMD, next) => {
|
||||||
|
log.trace('parsing tag(s)');
|
||||||
|
parseTagXml(request.post, log, (err, tags) =>
|
||||||
|
next(err, bucket, tags, objectMD));
|
||||||
|
},
|
||||||
|
(bucket, tags, objectMD, next) => {
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
objectMD.tags = tags;
|
||||||
|
const params = objectMD.versionId ? { versionId:
|
||||||
|
objectMD.versionId } : {};
|
||||||
|
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||||
|
log, err =>
|
||||||
|
next(err, bucket, objectMD));
|
||||||
|
},
|
||||||
|
], (err, bucket, objectMD) => {
|
||||||
|
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.trace('error processing request', { error: err,
|
||||||
|
method: 'objectPutTagging' });
|
||||||
|
} else {
|
||||||
|
pushMetric('putObjectTagging', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
additionalResHeaders['x-amz-version-id'] =
|
||||||
|
getVersionIdResHeader(verCfg, objectMD);
|
||||||
|
}
|
||||||
|
return callback(err, additionalResHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = objectPutTagging;
|
|
@ -1,7 +1,7 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import services from '../services';
|
const services = require('../services');
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Format of xml response:
|
* Format of xml response:
|
||||||
|
@ -50,11 +50,11 @@ function generateXml(xml, owner, userBuckets, splitter) {
|
||||||
* @param {function} callback - callback
|
* @param {function} callback - callback
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function serviceGet(authInfo, request, log, callback) {
|
function serviceGet(authInfo, request, log, callback) {
|
||||||
log.debug('processing request', { method: 'serviceGet' });
|
log.debug('processing request', { method: 'serviceGet' });
|
||||||
|
|
||||||
if (authInfo.isRequesterPublicUser()) {
|
if (authInfo.isRequesterPublicUser()) {
|
||||||
log.warn('operation not available for public user');
|
log.debug('operation not available for public user');
|
||||||
return callback(errors.AccessDenied);
|
return callback(errors.AccessDenied);
|
||||||
}
|
}
|
||||||
const xml = [];
|
const xml = [];
|
||||||
|
@ -83,3 +83,5 @@ export default function serviceGet(authInfo, request, log, callback) {
|
||||||
splitter));
|
splitter));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = serviceGet;
|
||||||
|
|
|
@ -1,24 +1,23 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import {
|
const { findRoutingRule, extractRedirectInfo } =
|
||||||
findRoutingRule,
|
require('./apiUtils/object/websiteServing');
|
||||||
extractRedirectInfo,
|
const { isObjAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
} from './apiUtils/object/websiteServing';
|
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
|
||||||
import { isObjAuthorized } from './apiUtils/authorization/aclChecks';
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
import collectResponseHeaders from '../utilities/collectResponseHeaders';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* _errorActions - take a number of actions once have error getting obj
|
* _errorActions - take a number of actions once have error getting obj
|
||||||
* @param {object} err - arsenal errors object
|
* @param {object} err - arsenal errors object
|
||||||
* @param {string} errorDocument - key to get error document
|
* @param {string} errorDocument - key to get error document
|
||||||
* @param {object []} routingRules - array of routingRule objects
|
* @param {object []} routingRules - array of routingRule objects
|
||||||
* @param {string} bucketName - bucket name from request
|
* @param {object} bucket - bucket metadata
|
||||||
* @param {string} objectKey - object key from request (or as translated in
|
* @param {string} objectKey - object key from request (or as translated in
|
||||||
* websiteGet)
|
* websiteGet)
|
||||||
* @param {object} corsHeaders - CORS-related response headers
|
* @param {object} corsHeaders - CORS-related response headers
|
||||||
|
@ -27,7 +26,8 @@ import { pushMetric } from '../utapi/utilities';
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
function _errorActions(err, errorDocument, routingRules,
|
function _errorActions(err, errorDocument, routingRules,
|
||||||
bucketName, objectKey, corsHeaders, log, callback) {
|
bucket, objectKey, corsHeaders, log, callback) {
|
||||||
|
const bucketName = bucket.getName();
|
||||||
const errRoutingRule = findRoutingRule(routingRules,
|
const errRoutingRule = findRoutingRule(routingRules,
|
||||||
objectKey, err.code);
|
objectKey, err.code);
|
||||||
if (errRoutingRule) {
|
if (errRoutingRule) {
|
||||||
|
@ -36,7 +36,7 @@ function _errorActions(err, errorDocument, routingRules,
|
||||||
objectKey);
|
objectKey);
|
||||||
}
|
}
|
||||||
if (errorDocument) {
|
if (errorDocument) {
|
||||||
return metadata.getObjectMD(bucketName, errorDocument, log,
|
return metadata.getObjectMD(bucketName, errorDocument, {}, log,
|
||||||
(errObjErr, errObjMD) => {
|
(errObjErr, errObjMD) => {
|
||||||
if (errObjErr) {
|
if (errObjErr) {
|
||||||
// error retrieving error document so return original error
|
// error retrieving error document so return original error
|
||||||
|
@ -44,6 +44,13 @@ function _errorActions(err, errorDocument, routingRules,
|
||||||
// to true
|
// to true
|
||||||
return callback(err, true, null, corsHeaders);
|
return callback(err, true, null, corsHeaders);
|
||||||
}
|
}
|
||||||
|
// return the default error message if the object is private
|
||||||
|
// rather than sending a stored error file
|
||||||
|
if (!isObjAuthorized(bucket, errObjMD, 'objectGet',
|
||||||
|
constants.publicId)) {
|
||||||
|
log.trace('errorObj not authorized', { error: err });
|
||||||
|
return callback(err, true, null, corsHeaders);
|
||||||
|
}
|
||||||
const dataLocator = errObjMD.location;
|
const dataLocator = errObjMD.location;
|
||||||
if (errObjMD['x-amz-server-side-encryption']) {
|
if (errObjMD['x-amz-server-side-encryption']) {
|
||||||
for (let i = 0; i < dataLocator.length; i++) {
|
for (let i = 0; i < dataLocator.length; i++) {
|
||||||
|
@ -73,7 +80,6 @@ function _errorActions(err, errorDocument, routingRules,
|
||||||
* @param {function} callback - callback to function in route
|
* @param {function} callback - callback to function in route
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default
|
|
||||||
function websiteGet(request, log, callback) {
|
function websiteGet(request, log, callback) {
|
||||||
log.debug('processing request', { method: 'websiteGet' });
|
log.debug('processing request', { method: 'websiteGet' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
|
@ -91,8 +97,6 @@ function websiteGet(request, log, callback) {
|
||||||
}
|
}
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
// bucket ACL's do not matter for website get since it is always the
|
|
||||||
// get of an object. object ACL's are what matter
|
|
||||||
const websiteConfig = bucket.getWebsiteConfiguration();
|
const websiteConfig = bucket.getWebsiteConfiguration();
|
||||||
if (!websiteConfig) {
|
if (!websiteConfig) {
|
||||||
return callback(errors.NoSuchWebsiteConfiguration, false, null,
|
return callback(errors.NoSuchWebsiteConfiguration, false, null,
|
||||||
|
@ -130,7 +134,7 @@ function websiteGet(request, log, callback) {
|
||||||
|
|
||||||
// get object metadata and check authorization and header
|
// get object metadata and check authorization and header
|
||||||
// validation
|
// validation
|
||||||
return metadata.getObjectMD(bucketName, objectKey, log,
|
return metadata.getObjectMD(bucketName, objectKey, {}, log,
|
||||||
(err, objMD) => {
|
(err, objMD) => {
|
||||||
// Note: In case of error, we intentionally send the original
|
// Note: In case of error, we intentionally send the original
|
||||||
// object key to _errorActions as in case of a redirect, we do
|
// object key to _errorActions as in case of a redirect, we do
|
||||||
|
@ -139,21 +143,25 @@ function websiteGet(request, log, callback) {
|
||||||
log.trace('error retrieving object metadata',
|
log.trace('error retrieving object metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
let returnErr = err;
|
let returnErr = err;
|
||||||
// AWS returns AccessDenied instead of NoSuchKey
|
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||||
if (err === errors.NoSuchKey) {
|
'bucketGet', constants.publicId);
|
||||||
|
// if index object does not exist and bucket is private AWS
|
||||||
|
// returns 403 - AccessDenied error.
|
||||||
|
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr,
|
return _errorActions(returnErr,
|
||||||
websiteConfig.getErrorDocument(), routingRules,
|
websiteConfig.getErrorDocument(), routingRules,
|
||||||
bucketName, reqObjectKey, corsHeaders, log, callback);
|
bucket, reqObjectKey, corsHeaders, log,
|
||||||
|
callback);
|
||||||
}
|
}
|
||||||
if (!isObjAuthorized(bucket, objMD, 'objectGet',
|
if (!isObjAuthorized(bucket, objMD, 'objectGet',
|
||||||
constants.publicId)) {
|
constants.publicId)) {
|
||||||
const err = errors.AccessDenied;
|
const err = errors.AccessDenied;
|
||||||
log.trace('request not authorized', { error: err });
|
log.trace('request not authorized', { error: err });
|
||||||
return _errorActions(err, websiteConfig.getErrorDocument(),
|
return _errorActions(err, websiteConfig.getErrorDocument(),
|
||||||
routingRules, bucketName, reqObjectKey, corsHeaders,
|
routingRules, bucket,
|
||||||
log, callback);
|
reqObjectKey, corsHeaders, log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
const headerValResult = validateHeaders(objMD, request.headers);
|
const headerValResult = validateHeaders(objMD, request.headers);
|
||||||
|
@ -161,8 +169,8 @@ function websiteGet(request, log, callback) {
|
||||||
const err = headerValResult.error;
|
const err = headerValResult.error;
|
||||||
log.trace('header validation error', { error: err });
|
log.trace('header validation error', { error: err });
|
||||||
return _errorActions(err, websiteConfig.getErrorDocument(),
|
return _errorActions(err, websiteConfig.getErrorDocument(),
|
||||||
routingRules, bucketName, reqObjectKey, corsHeaders,
|
routingRules, bucket, reqObjectKey,
|
||||||
log, callback);
|
corsHeaders, log, callback);
|
||||||
}
|
}
|
||||||
// check if object to serve has website redirect header
|
// check if object to serve has website redirect header
|
||||||
// Note: AWS prioritizes website configuration rules over
|
// Note: AWS prioritizes website configuration rules over
|
||||||
|
@ -200,3 +208,5 @@ function websiteGet(request, log, callback) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = websiteGet;
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
import collectCorsHeaders from '../utilities/collectCorsHeaders';
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
import metadata from '../metadata/wrapper';
|
const metadata = require('../metadata/wrapper');
|
||||||
import bucketShield from './apiUtils/bucket/bucketShield';
|
const bucketShield = require('./apiUtils/bucket/bucketShield');
|
||||||
import {
|
const { findRoutingRule, extractRedirectInfo } =
|
||||||
findRoutingRule,
|
require('./apiUtils/object/websiteServing');
|
||||||
extractRedirectInfo,
|
const { isObjAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
} from './apiUtils/object/websiteServing';
|
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
|
||||||
import { isObjAuthorized } from './apiUtils/authorization/aclChecks';
|
const validateHeaders = require('../utilities/validateHeaders');
|
||||||
import collectResponseHeaders from '../utilities/collectResponseHeaders';
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
import validateHeaders from '../utilities/validateHeaders';
|
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
|
||||||
import { pushMetric } from '../utapi/utilities';
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -43,7 +42,7 @@ function _errorActions(err, routingRules, objectKey, corsHeaders, log,
|
||||||
* @param {function} callback - callback to function in route
|
* @param {function} callback - callback to function in route
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
export default function websiteHead(request, log, callback) {
|
function websiteHead(request, log, callback) {
|
||||||
log.debug('processing request', { method: 'websiteHead' });
|
log.debug('processing request', { method: 'websiteHead' });
|
||||||
const bucketName = request.bucketName;
|
const bucketName = request.bucketName;
|
||||||
const reqObjectKey = request.objectKey ? request.objectKey : '';
|
const reqObjectKey = request.objectKey ? request.objectKey : '';
|
||||||
|
@ -95,7 +94,7 @@ export default function websiteHead(request, log, callback) {
|
||||||
|
|
||||||
// get object metadata and check authorization and header
|
// get object metadata and check authorization and header
|
||||||
// validation
|
// validation
|
||||||
return metadata.getObjectMD(bucketName, objectKey, log,
|
return metadata.getObjectMD(bucketName, objectKey, {}, log,
|
||||||
(err, objMD) => {
|
(err, objMD) => {
|
||||||
// Note: In case of error, we intentionally send the original
|
// Note: In case of error, we intentionally send the original
|
||||||
// object key to _errorActions as in case of a redirect, we do
|
// object key to _errorActions as in case of a redirect, we do
|
||||||
|
@ -104,8 +103,11 @@ export default function websiteHead(request, log, callback) {
|
||||||
log.trace('error retrieving object metadata',
|
log.trace('error retrieving object metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
let returnErr = err;
|
let returnErr = err;
|
||||||
// AWS returns AccessDenied instead of NoSuchKey
|
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||||
if (err === errors.NoSuchKey) {
|
'bucketGet', constants.publicId);
|
||||||
|
// if index object does not exist and bucket is private AWS
|
||||||
|
// returns 403 - AccessDenied error.
|
||||||
|
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr, routingRules,
|
return _errorActions(returnErr, routingRules,
|
||||||
|
@ -152,3 +154,5 @@ export default function websiteHead(request, log, callback) {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = websiteHead;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import constants from '../../constants';
|
const constants = require('../../constants');
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class containing requester's information received from Vault
|
* Class containing requester's information received from Vault
|
||||||
|
@ -7,7 +7,7 @@ import constants from '../../constants';
|
||||||
* @return {AuthInfo} an AuthInfo instance
|
* @return {AuthInfo} an AuthInfo instance
|
||||||
*/
|
*/
|
||||||
|
|
||||||
export default class AuthInfo {
|
class AuthInfo {
|
||||||
constructor(objectFromVault) {
|
constructor(objectFromVault) {
|
||||||
const { arn, canonicalID, shortid, email,
|
const { arn, canonicalID, shortid, email,
|
||||||
accountDisplayName, IAMdisplayName } = objectFromVault;
|
accountDisplayName, IAMdisplayName } = objectFromVault;
|
||||||
|
@ -50,3 +50,5 @@ export default class AuthInfo {
|
||||||
return this.canonicalID === constants.publicId;
|
return this.canonicalID === constants.publicId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = AuthInfo;
|
||||||
|
|
|
@ -1,10 +1,11 @@
|
||||||
import { errors } from 'arsenal';
|
const { errors } = require('arsenal');
|
||||||
import crypto from 'crypto';
|
|
||||||
|
|
||||||
import config from '../../Config';
|
const crypto = require('crypto');
|
||||||
import Indexer from './indexer';
|
|
||||||
|
|
||||||
import { calculateSigningKey, hashSignature } from './vaultUtilities';
|
const { config } = require('../../Config');
|
||||||
|
const Indexer = require('./indexer');
|
||||||
|
|
||||||
|
const { calculateSigningKey, hashSignature } = require('./vaultUtilities');
|
||||||
|
|
||||||
const authIndex = new Indexer(config.authData);
|
const authIndex = new Indexer(config.authData);
|
||||||
|
|
||||||
|
@ -156,6 +157,9 @@ const backend = {
|
||||||
* @param {object} requestContextParams.paramaterize - params that have
|
* @param {object} requestContextParams.paramaterize - params that have
|
||||||
* arrays as values since a requestContext needs to be constructed with
|
* arrays as values since a requestContext needs to be constructed with
|
||||||
* each option in Vault
|
* each option in Vault
|
||||||
|
* @param {object[]} requestContextParams.paramaterize.specificResource -
|
||||||
|
* specific resources paramaterized as an array of objects containing
|
||||||
|
* properties `key` and optional `versionId`
|
||||||
* @param {string} userArn - arn of requesting user
|
* @param {string} userArn - arn of requesting user
|
||||||
* @param {object} log - log object
|
* @param {object} log - log object
|
||||||
* @param {function} cb - callback with either error or an array
|
* @param {function} cb - callback with either error or an array
|
||||||
|
@ -165,14 +169,14 @@ const backend = {
|
||||||
*/
|
*/
|
||||||
checkPolicies: (requestContextParams, userArn, log, cb) => {
|
checkPolicies: (requestContextParams, userArn, log, cb) => {
|
||||||
let results;
|
let results;
|
||||||
const specificResourceParams =
|
const parameterizeParams = requestContextParams.parameterize;
|
||||||
requestContextParams.parameterize.specificResource;
|
if (parameterizeParams && parameterizeParams.specificResource) {
|
||||||
if (specificResourceParams) {
|
|
||||||
// object is parameterized
|
// object is parameterized
|
||||||
results = specificResourceParams.map(obj => ({
|
results = parameterizeParams.specificResource.map(obj => ({
|
||||||
isAllowed: true,
|
isAllowed: true,
|
||||||
arn: _buildArn(requestContextParams
|
arn: _buildArn(requestContextParams
|
||||||
.constantParams.generalResource, obj),
|
.constantParams.generalResource, obj.key),
|
||||||
|
versionId: obj.versionId,
|
||||||
}));
|
}));
|
||||||
} else {
|
} else {
|
||||||
results = [{
|
results = [{
|
||||||
|
@ -191,4 +195,4 @@ const backend = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
export default backend;
|
module.exports = backend;
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
/** build simple authdata with only one account
|
||||||
|
* @param {string} accessKey - account's accessKey
|
||||||
|
* @param {string} secretKey - account's secretKey
|
||||||
|
* @return {object} authdata - authdata with account's accessKey and secretKey
|
||||||
|
*/
|
||||||
|
function buildAuthDataAccount(accessKey, secretKey) {
|
||||||
|
return {
|
||||||
|
accounts: [{
|
||||||
|
name: 'CustomAccount',
|
||||||
|
email: 'customaccount1@setbyenv.com',
|
||||||
|
arn: 'aws::iam:123456789012:root',
|
||||||
|
canonicalID: '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' +
|
||||||
|
'18e7cd47qwer',
|
||||||
|
shortid: '123456789012',
|
||||||
|
keys: [{
|
||||||
|
access: accessKey,
|
||||||
|
secret: secretKey,
|
||||||
|
}],
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
buildAuthDataAccount,
|
||||||
|
};
|
|
@ -1,7 +1,7 @@
|
||||||
import { Logger } from 'werelogs';
|
const werelogs = require('werelogs');
|
||||||
|
|
||||||
// Here, we expect the logger to have already been configured in S3
|
// Here, we expect the logger to have already been configured in S3
|
||||||
const log = new Logger('S3');
|
const log = new werelogs.Logger('S3');
|
||||||
|
|
||||||
function incr(count) {
|
function incr(count) {
|
||||||
if (count !== undefined) {
|
if (count !== undefined) {
|
||||||
|
@ -158,7 +158,7 @@ function dumpErrors(checkData) {
|
||||||
* @return {boolean} true on erroneous data
|
* @return {boolean} true on erroneous data
|
||||||
* false on success
|
* false on success
|
||||||
*/
|
*/
|
||||||
export default function check(authdata) {
|
function check(authdata) {
|
||||||
const checkData = {
|
const checkData = {
|
||||||
errors: [],
|
errors: [],
|
||||||
emails: [],
|
emails: [],
|
||||||
|
@ -180,3 +180,5 @@ export default function check(authdata) {
|
||||||
|
|
||||||
return dumpErrors(checkData);
|
return dumpErrors(checkData);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module.exports = check;
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue