Compare commits

...

290 Commits

Author SHA1 Message Date
Rahul Padigela a26af365d9 Merge remote-tracking branch 'origin/rel/6.4' into fwdport/6.4-7 2017-08-14 10:47:49 -07:00
Lauren Spiegel 7c05150f1d Merge pull request #815 from scality/backport/fix-mpu-prefix
backport to rel/7.0: missing mpu bucket prefix [S3C-632]
2017-07-11 17:04:53 -07:00
Electra Chong dda6c4e551 fix: missing mpu bucket prefix
(cherry picked from commit 2fb0b6855a)
2017-07-11 11:45:32 -07:00
Electra Chong beb93d0f65 chore: lock dependency package versions 2017-07-11 11:45:32 -07:00
Lauren Spiegel 3c889df957 Merge pull request #782 from scality/rf/extractUtils
Rf/extract utils
2017-06-21 15:04:48 -07:00
Lauren Spiegel ae02ea7896 RF: Remove general utils file
1) Extract utilities of general use to arsenal
2) Move specific utils to correct place
3) Remove utils not being used
2017-06-21 11:58:38 -07:00
Lauren Spiegel ecd3951d09 Merge pull request #784 from scality/S3C-556/addMoreInfoInError
FT: More info on location err message
2017-06-21 11:45:47 -07:00
Lauren Spiegel 72c4384b20 FT: More info on location err message 2017-06-21 10:35:55 -07:00
Lauren Spiegel cadc669b76 Merge pull request #777 from scality/ft/new-docker-env-vars
New docker envionment variables
2017-06-20 17:27:00 -07:00
Rached Ben Mustapha ba52f4f2b1 Document new docker env vars 2017-06-20 13:19:45 -07:00
Rached Ben Mustapha 54f224cde0 Allow changing configuration in docker-entrypoint
The following variables are handled:

* `LISTEN_ADDR`: address to bind to for all processes if not localhost
* `DATA_HOST`: data server if not localhost
* `METADATA_HOST`: metadata server if not localhost
* `REDIS_HOST`: stats cache host
* `REDIS_PORT`: stats cache port
2017-06-20 13:19:45 -07:00
Rached Ben Mustapha 5bf463ac31 Install jq for easy config file changes 2017-06-20 13:19:45 -07:00
Lauren Spiegel df2c4a41f4 Merge pull request #781 from scality/FIX/update-tests
Update how multiple backend tests are run
2017-06-20 12:55:12 -07:00
Dora Korpar a83a402672 Update how multiple backend tests are run 2017-06-20 11:56:50 -07:00
Lauren Spiegel 7dac05eb22 Merge pull request #773 from scality/FIX/awsSocketHangup
Minimize socket hangup errors from AWS
2017-06-20 10:41:43 -07:00
Dora Korpar d0c71cb778 Minimize socket hangup errors from AWS
Minimize socket hangup errors from AWS

AWS was returning NoSuchKey when we did a get request immediately following a put to AWS so need to delay the get check until AWS registers the object as there.
2017-06-19 17:57:24 -07:00
Lauren Spiegel 8aa9e76d96 Merge pull request #771 from scality/BF/S3C-531-chordcos
BF S3C-531 chordChos must be 1 digit string smaller than 7
2017-06-17 20:25:25 -07:00
Marc Ségura 24b6ac1609 BF S3C-531 chordChos must be 1 digit string smaller than 7
chordChos must be 1 digit string smaller than 7
fix S3C-531
2017-06-17 19:14:04 -07:00
Lauren Spiegel 612d5ca8b6 Merge pull request #774 from scality/bf/S3C-548-sproxydImmutableRework
bf: immutable optim option on S3 sproxyd client
2017-06-17 13:11:01 -07:00
Jonathan Gramain a07e188d08 bf: immutable optim option on S3 sproxyd client
This is a new option in scality/sproxydclient that explicitly enables
immutable optims, because they are now disabled by default it's now
necessary to provide the option.

Goes along with PR https://github.com/scality/sproxydclient/pull/119
2017-06-16 22:30:14 -07:00
Lauren Spiegel 7312053539 Merge pull request #731 from scality/doc/adddoc
DOC: add documentations
2017-06-16 16:22:01 -07:00
Nicolas Humbert ac8b3c267c DOC: add documentations 2017-06-16 14:09:38 -07:00
Lauren Spiegel b96a81fd70 Merge pull request #768 from scality/rf/extract-routes
Rf/extract routes to Arsenal [S3C-511]
2017-06-16 10:17:46 -07:00
Electra Chong 7e928addcf rf: move s3 routes to Arsenal
Extracting routes for use in other S3-related projects.
2017-06-15 17:51:07 -07:00
Lauren Spiegel e811f63dff Merge pull request #748 from scality/TEST/putacl-nullver
fix: clean up existing master null version before overwrite [S3C-444]
2017-06-15 16:37:56 -07:00
Electra Chong fe7cf1e750 fix: cleanup mst null version before overwrite 2017-06-15 12:03:50 -07:00
Electra Chong 83003e2378 rf: factorize versioningPreprocessing 2017-06-15 12:03:50 -07:00
Electra Chong d7d887b284 cleanup: organize putacl ver utilities 2017-06-15 11:10:51 -07:00
Lauren Spiegel 982efd91c5 Merge pull request #755 from scality/refacto/multiple
REFACTO: multiple backend gateway
2017-06-14 18:22:17 -07:00
Nicolas Humbert b2bedd20db REFACTO: multiple backend gateway 2017-06-14 14:09:53 -07:00
Lauren Spiegel 426cddc564 Merge pull request #772 from scality/FIX/multBackendFuncTest
Fix multiple backend tests
2017-06-14 12:41:30 -07:00
Dora Korpar 464cb74d42 Fix tests 2017-06-14 11:43:44 -07:00
Lauren Spiegel 52f2265d59 Merge pull request #766 from scality/fix/locConstraintFlex
FIX: Allow operation if have valid loc
2017-06-13 10:17:58 -07:00
Lauren Spiegel c8ad30964c TEST: Unknown endpoint tests 2017-06-12 15:55:21 -07:00
Lauren Spiegel 4391b7ce3f FT: Set us-east-1 as default
On put bucket, if no locationConstraint is provided
and the endpoint hit is not in the config (since it is
an IP address and path style request), use us-east-1
as locationConstraint.
2017-06-12 14:58:32 -07:00
Lauren Spiegel 25cb39df38 FIX: Allow operation if have valid loc
If have object level location constraint
or bucket level location constraint allow
operation to continue even if restEndpoint is
not in config.
2017-06-12 14:58:32 -07:00
Lauren Spiegel c9e220fa1a Merge pull request #712 from scality/ft/versioning-design
ft: add versioning info to DESIGN.md
2017-06-12 14:37:21 -07:00
Electra Chong c354c7c6a8 ft: add versioning info to DESIGN.md 2017-06-09 17:08:12 -07:00
Lauren Spiegel 7c8c9443fd Merge pull request #763 from scality/S3C-500/addEndpointLog
LOG: Add endpoint log
2017-06-08 14:15:50 -07:00
Lauren Spiegel d3756f35a3 LOG: Add endpoint log 2017-06-08 12:35:44 -07:00
Lauren Spiegel df329a9e74 Merge pull request #762 from scality/S3C-508/removeExcessLogInfo
FIX: Remove excess logging info
2017-06-08 12:33:55 -07:00
Lauren Spiegel 6150fdff63 FIX: Remove excess logging info 2017-06-08 11:34:34 -07:00
Lauren Spiegel 4091a15545 Merge pull request #734 from scality/S3C-380/get-security-token
S3C-380: Get security token
2017-06-08 10:48:54 -07:00
Alexandre Merle fcca62b303 S3C-380: Get security token
Send security token to vault

See https://scality.atlassian.net/browse/S3C-380
2017-06-07 10:58:30 +02:00
Lauren Spiegel 28c509a477 Merge pull request #756 from scality/ft/limit-objmd-size
ft: limit user metadata to 2 KB
2017-06-05 16:48:37 -07:00
Electra Chong dfb97ba4c6 ft: limit user metadata to 2 KB
AWS limits user-provided metadata to 2 KB. Now that we are implementing AWS and other backends, we should be consistent in the amount of user metadata we allow during puts.
2017-06-05 15:05:11 -07:00
Lauren Spiegel 1bb17a6502 Merge pull request #747 from scality/fx/copyobjectacl
S3C-413 copy object ACL with s3:PutObjectAcl permission
2017-06-02 09:53:25 -07:00
Nicolas Humbert 41529cd208 S3C-413 copy object ACL with s3:PutObjectAcl permission 2017-06-01 14:54:45 -07:00
Lauren Spiegel 0961721fbd Merge pull request #735 from scality/ft/S3C-48-awsBackend
Add AWS backend
2017-06-01 13:54:20 -07:00
Dora Korpar 9c33ae6df6 Add aws backend 2017-05-31 18:01:23 -07:00
Lauren Spiegel 4a6a247839 Merge pull request #752 from scality/S3C-291-add-value-for-omitted-ID-put-bucket-replication
FT: Create replication rule ID if not provided
2017-05-31 17:06:03 -07:00
Bennett Buchanan 7daa9a5390 FT: Create replication rule ID if not provided 2017-05-31 16:09:29 -07:00
Lauren Spiegel 6201d5b074 Merge pull request #745 from scality/fx/putobjectacl
S3C-413 put object ACL with s3:PutObjectAcl permission
2017-05-31 14:40:32 -07:00
Nicolas Humbert 7076660d8b S3C-413 put object ACL with s3:PutObjectAcl permission 2017-05-31 12:00:14 -07:00
Lauren Spiegel 6b8443012e Merge pull request #582 from scality/ft/object-info
REFACTOR: Metadata object using class
2017-05-31 11:38:59 -07:00
Alexandre Merle c22e44f63d REFACTOR: Metadata object using class 2017-05-30 18:05:31 -07:00
Lauren Spiegel 246ada580c Merge pull request #750 from scality/DOCKER/multiple
DOCKER: add env var for multiple data backends
2017-05-30 17:48:32 -07:00
Nicolas Humbert 612f9f6a7b DOCKER: add env var for multiple data backends 2017-05-30 17:15:21 -07:00
Rahul Padigela 9efa353228 Merge pull request #729 from scality/S3C-291-put-bucket-replication
FT: Add bucketPutReplication API
2017-05-27 01:47:50 -07:00
Bennett Buchanan ae14f00eb2 FT: Add bucketPutReplication API 2017-05-26 18:43:21 -07:00
Vianney Rancurel 82c9204212 Merge pull request #749 from scality/ft/straightUpGets
FIX: Only stream from one data location at a time.
2017-05-26 14:16:41 -07:00
Lauren Spiegel 1ae15540d5 FIX: Only stream from one data location at a time.
S3C-429
Prevent memory problems by no longer double buffering.
2017-05-26 13:03:50 -07:00
Lauren Spiegel e579bf893a Merge pull request #746 from scality/CLEANUP/remove-babel
cleanup: remove babel remnants
2017-05-26 11:19:41 -07:00
Electra Chong 7dc199f65f cleanup: remove babel remnants 2017-05-25 18:14:58 -07:00
Lauren Spiegel cac7450507 Merge pull request #742 from scality/rf/module-imports
chore: require modules instead of import [S3C-332]
2017-05-24 20:09:05 -07:00
Electra Chong 0ac05ae828 chore: require modules instead of import 2017-05-24 17:37:51 -07:00
Lauren Spiegel 5f32e0479f Merge pull request #730 from scality/ft/copytagging
FT: objectCopy with tags
2017-05-24 16:59:42 -07:00
Nicolas Humbert ad876da61d FT: objectCopy with tags 2017-05-24 16:09:55 -07:00
Lauren Spiegel 54f057cb21 Merge pull request #743 from scality/fx/putobjectwithtagging
FX: putObject with tag set policy
2017-05-23 16:06:29 -07:00
Nicolas Humbert 126d1000bd FX: putobject with tag set policy 2017-05-23 14:41:29 -07:00
Lauren Spiegel 14dc60e288 Merge pull request #733 from scality/ft/getobjectwithtagging
FT: getObject with tagging
2017-05-23 12:17:12 -07:00
Nicolas Humbert 9775048cbb FT: getObject with tagging 2017-05-23 11:34:53 -07:00
Lauren Spiegel 330001477b Merge pull request #741 from scality/test/versioning
TEST: version non-specified (tag and acl)
2017-05-22 16:43:55 -07:00
Nicolas Humbert fcc9dc799e TEST: version non-specified (tag and acl) 2017-05-22 12:49:51 -07:00
Lauren Spiegel fd7464e92b Merge pull request #740 from scality/forward/rel/6.4
Forward/rel/6.4
2017-05-22 10:59:08 -07:00
Dora Korpar 29ef286a0d Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-05-22 09:55:57 -07:00
Lauren Spiegel 82897e2053 Merge pull request #725 from scality/TEST/version-specific-dm-creation
fix: return versionId when delete non-exist ver [S3C-368]
2017-05-19 18:10:06 -07:00
Electra Chong ee5b8811d6 fix: return versionId when delete non-exist ver 2017-05-18 12:54:16 -07:00
Lauren Spiegel 67a12fa551 Merge pull request #721 from scality/S3C-194-handle-utapi-metrics-for-versioning
FT: Handle Utapi metrics for versioning
2017-05-18 12:54:05 -07:00
Bennett Buchanan 7ed6a14fe7 FT: Handle Utapi metrics for versioning 2017-05-18 10:58:30 -07:00
Lauren Spiegel 41bd9ad69d Merge pull request #727 from scality/ft/putobjectwithtag
FT: put object with tag set
2017-05-16 13:04:23 -07:00
Nicolas Humbert 0e6e4fbad2 FT: put object with tag set 2017-05-16 11:10:03 -07:00
Lauren Spiegel 260aa509ef Merge pull request #723 from scality/fix/putemptytag
FIX: put object tag with empty value
2017-05-11 17:37:05 -07:00
Nicolas Humbert cc105ab05b FIX: put object tag with empty value 2017-05-11 09:40:22 -07:00
Lauren Spiegel 94d880ed69 Merge pull request #720 from scality/ft/deleteobjecttagging
FT: deleteObjectTagging
2017-05-10 18:24:33 -07:00
Nicolas Humbert a3fa60b24c FT: deleteObjectTagging 2017-05-10 17:00:44 -07:00
Lauren Spiegel 6801842239 Merge pull request #724 from scality/forward/rel/6.4
Forward/rel/6.4
2017-05-10 16:51:03 -07:00
Lauren Spiegel 4c106b0870 Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-05-10 15:49:20 -07:00
Lauren Spiegel a466c6ded9 Merge pull request #719 from scality/TEST/deleteMarkerNullDelete
test: creating delete markers w/ null ver
2017-05-09 14:04:17 -07:00
Electra Chong 068d2520b1 test: creating delete markers w/ null ver 2017-05-09 13:03:08 -07:00
Lauren Spiegel 12c33be6e1 Merge pull request #717 from scality/ft/getobjecttagging
FT: getObjectTagging
2017-05-08 18:00:37 -07:00
Nicolas Humbert 0085179415 FT: getObjectTagging 2017-05-08 14:56:52 -07:00
Lauren Spiegel 0bafb45378 Merge pull request #718 from scality/fx/putobjecttagging
FIX: putObjectTagging
2017-05-08 14:37:04 -07:00
Nicolas Humbert 2aa584b97e FIX: ACL permissions 2017-05-08 13:23:49 -07:00
Nicolas Humbert 66288573b1 CLEANUP: renaming CORSHeaders 2017-05-05 16:49:04 -07:00
Lauren Spiegel c2a84908cd Merge pull request #716 from scality/CLEANUP/listingTypes
rf: clean up listing types, parsing [S3C-184]
2017-05-05 16:10:39 -07:00
Electra Chong f8532a9ae8 ft: add listing document 2017-05-05 15:05:51 -07:00
Electra Chong 4609f62e04 rf: clean up listing types, parsing
No longer use 'Basic' listing type.
Move JSON parsing for 'DelimiterVersions' to metadata wrapper to be consistent with what we do for other listing types.
Add some more assertions to listing tests.
2017-05-05 12:39:28 -07:00
Lauren Spiegel 83a3f41e35 Merge pull request #709 from scality/ft/putobjecttagging
FT: putObjectTagging
2017-05-05 09:55:40 -07:00
Nicolas Humbert e8f62f24fb FT: putObjectTagging 2017-05-04 17:26:40 -07:00
Lauren Spiegel 64791b1424 Merge pull request #692 from scality/ft/S3C-158-data-daemon
S3C-158 use remote data server for file backend
2017-05-03 18:20:36 -07:00
Jonathan Gramain 830a2f8203 doc: data_metadata_daemon.md documentation 2017-05-03 15:04:14 -07:00
Jonathan Gramain fbcd4e9da7 ft: Use data REST from Arsenal
Use the new REST interface to store and retrieve data blobs

We spawn a new local REST server listening on 9991 ( or the value of
config.dataDaemon.port) and we use a REST client module to communicate
with the server.

Additional changes:

 - cleanup: move S3METADATAPATH env var usage in mdserver.js
 - use parseRange from Arsenal
 - fix display of functional test on range: error case were displayed in place of "success" case
2017-05-03 15:04:14 -07:00
Jonathan Gramain a29091d306 ft: fix metadata class names
Adapt to changes done in https://github.com/scality/Arsenal/pull/235
2017-05-03 15:04:14 -07:00
Lauren Spiegel 61307e7683 Merge pull request #713 from scality/FIX/verDeleteMarkers
fix: add objAcl checks for delete marker [S3C-184]
2017-05-03 14:15:07 -07:00
Electra Chong 4514541265 fix: add objAcl checks for delete marker 2017-05-03 12:15:03 -07:00
Lauren Spiegel bfc69481e3 Merge pull request #708 from scality/ft/group-replication-token
Ft/group replication token
2017-05-02 10:46:09 -07:00
Electra Chong 0bced55129 ft: parse & use repGroupId from config 2017-05-02 10:07:58 -07:00
Lauren Spiegel 24f60dbe69 Merge pull request #697 from scality/FIX/nullDataDelete
FIX: ensure null ver data is deleted on overwrite [S3C-184]
2017-05-01 13:40:45 -07:00
Electra Chong f0c2e06197 FIX: ensure null ver data is deleted on overwrite
Depends on scality/S3#698
2017-05-01 11:59:09 -07:00
Lauren Spiegel 0f6bc184c0 Merge pull request #711 from scality/forward/rel/6.4
Forward/rel/6.4
2017-05-01 11:56:37 -07:00
Lauren Spiegel 3f4542eb3c Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-05-01 10:10:17 -07:00
Lauren Spiegel 47e6a15eb7 Merge pull request #707 from scality/docker/updatedockerfilemem
DOCKER: update DockerFile mem
2017-04-25 17:37:16 -07:00
Nicolas Humbert dbe3e817e9 DOCKER: update DockerFile mem 2017-04-25 13:56:30 -07:00
Lauren Spiegel a78c65e4cd Merge pull request #698 from scality/FIX/refuseVersionId
fix: put, copy, completeMPU should reject verId
2017-04-25 13:11:50 -07:00
Electra Chong 3f8bc9eafa fix: put, copy, completeMPU should reject verId 2017-04-24 18:22:25 -07:00
Lauren Spiegel 6a8e1cd72f Merge pull request #705 from scality/fx/putbucketwithuser
FIX: put Bucket as a user
2017-04-24 18:09:23 -07:00
Nicolas Humbert 2dd1eb133a FIX: put Bucket as a user
FIXES: #704
2017-04-24 16:47:58 -07:00
Lauren Spiegel 71db32c829 Merge pull request #706 from scality/ft/S3C-193-bucketfile-versioning
S3C-193 bucketfile versioning support
2017-04-24 16:41:03 -07:00
Vinh Tao dd453becba S3C-193 versioning for bucketfile client
This is to support versioning in S3 bucketfile backend by using the
versioning support in Arsenal ported from MetaData.

Additional changes:
- moves the 'SYNC' options to the bucketfile backend in Arsenal
- checks for the error 'ObjNotFound' instead of 'notFound'
2017-04-24 14:15:19 -07:00
Jonathan Gramain 37638ed0ed S3C-193 Adapt code for Arsenal RPC
Adapt to changes in Arsenal due to level-net refactoring
2017-04-24 14:14:35 -07:00
Lauren Spiegel 7318a9de58 Merge pull request #700 from scality/FIX/deleteMarkerMetadata
fix: delete marker creation / md [S3C-184]
2017-04-21 10:23:39 -07:00
Electra Chong ef36f84525 fix: delete marker creation / md
We did not reset the content length for multi-object delete requests when creating delete markers, meaning the delete marker stores content-length with the length of the xml, which is incorrect and causes the request to hang at least for file backend. It would likely also pose an issue for accurately tracking bytes created in Utapi.
2017-04-20 12:08:16 -07:00
Lauren Spiegel 284d0bc9bc Merge pull request #703 from scality/test/getObject
S3C-184 TEST: add getObject versioning tests
2017-04-20 10:06:29 -07:00
Nicolas Humbert 39baa0bede S3C-184 TEST: add getObject versioning tests 2017-04-19 18:00:06 -07:00
Lauren Spiegel 00aabbfc01 Merge pull request #702 from scality/fx/functionaltests
FIX: package.json for functional tests
2017-04-19 14:09:38 -07:00
Nicolas Humbert 0f4ef8e02b FIX: package.json for functional tests 2017-04-19 10:35:50 -07:00
Lauren Spiegel de9e8b3b07 Merge pull request #699 from scality/fix/alwayslocation
S3C-287 FIX: Always set bucket location
2017-04-19 09:39:53 -07:00
Nicolas Humbert 4792fe6fe1 S3C-287 FIX: Always set bucket location 2017-04-18 13:47:21 -07:00
Lauren Spiegel 3f9c2a5d2d Merge pull request #680 from scality/ft/multiobjdelete-versioningpolicy
ft: add versionId to multiObjDelete requestContext [S3C-184]
2017-04-14 18:06:30 -07:00
Electra Chong 97a5633ef0 ft: add versionId to multiObjDelete requestContext 2017-04-14 17:06:21 -07:00
Lauren Spiegel d5f47c5b67 Merge pull request #694 from scality/ft/S3C-286-splitAwsSdkTests
ft: split aws-sdk tests
2017-04-14 16:22:48 -07:00
Rahul Padigela c3a38af756 cleanup: remove aws-sdk package.json 2017-04-14 15:40:23 -07:00
Rahul Padigela 4c1d0d7084 ft: split aws-sdk tests
Split aws-sdk tests so that parallel CI containers can be leveraged to run
different test suites.
This introduces a manual step when adding a new test directory under aws-sdk as
the end to end CI no longer just runs `npm run ft_test`. For the tests to run in
end to end CI, the new dir name needs to be added in S3/package.json to either
- ft_awssdk_buckets
- ft_awssdk_objects_misc
- ft_awssdk_versioning

If a new npm command is being introduced, make sure to update Integration scripts
to run the test suite.
2017-04-14 15:40:23 -07:00
Lauren Spiegel 367903472a Merge pull request #687 from scality/fx/multideleteversioning
FIX: returning correct DeleteMarkerVersionId when deleteObjects
2017-04-14 15:27:40 -07:00
Nicolas Humbert 80ae2d8b9e FIX: returning correct DeleteMarkerVersionId when deleteObjects 2017-04-14 14:00:00 -07:00
Lauren Spiegel 9f036e59ec Merge pull request #695 from scality/ft/noNoiseHealthcheck
No log clutter on successful healthcheck
2017-04-13 17:03:51 -07:00
Dora Korpar f14bc5fb1a No log clutter on successful healthcheck 2017-04-13 10:22:03 -07:00
Lauren Spiegel c65d3c9f31 Merge pull request #693 from scality/cleanup/removeMetaValAuth
CLEANUP: Remove obsolete metadataValidateAuthorization func
2017-04-13 10:14:34 -07:00
Dora Korpar 118f128a07 Remove obsolute metadataValidateAuthorization func 2017-04-12 17:38:16 -07:00
Lauren Spiegel 2413188998 Merge pull request #696 from scality/fix/ymllint
FIX - yml linting
2017-04-12 17:36:46 -07:00
Lauren Spiegel a64bac4361 FIX - yml linting 2017-04-12 16:48:05 -07:00
Giorgio Regni 441ba89c48 Set theme jekyll-theme-minimal 2017-04-12 16:09:38 -07:00
Lauren Spiegel 049e1204c0 Merge pull request #691 from scality/test/deletebucketversioning
TEST: add more tests for deleteBucket with versioning
2017-04-12 14:42:30 -07:00
Nicolas Humbert fe717fc826 TEST: add more tests for deleteBucket with versioning
S3C-184
2017-04-12 11:10:25 -07:00
Lauren Spiegel 293ff1e1ed Merge pull request #689 from scality/test/copyversioningchecks
TEST: versioning objectCopy add more checks
2017-04-12 10:36:59 -07:00
Nicolas Humbert a5919d51ea TEST: versioning objectCopy add more checks
S3C-184
2017-04-10 16:59:58 -07:00
Lauren Spiegel e08f57fbd7 Merge pull request #648 from scality/ft/S3C-35-useLevelNet
S3C-35 Refactoring of bucketfile
2017-04-07 12:11:52 -07:00
Jonathan Gramain b9adc5e969 S3C-35 Refactoring of bucketfile (S3 file metadata backend)
The changes allow bucketfile to use a new API in Arsenal to
communicate with a remote leveldb database, containing sublevels for
bucket storage. Metadata is still stored on a local levelDB server,
but it should now be easy to move the storage logic in a new daemon
running on a remote server, and it should be robust.

Instead of relying on the existing implementation of multilevel, it
uses client/server wrappers around a new level-net communication
protocol and API in Arsenal based on socket.io to exchange messages.

It shall be compatible with the existing metadata since it still uses
the core sublevel module for the storage logic, only the RPC procotol
has changed.

Test done:

 - put a few 100s of files in different S3 subdirectories
 - list directories and subdirectories
 - get/delete files
 - multi-part upload
 - introduce random connection errors (tcpkill) to check robustness
   and automatic reconnection
2017-04-06 14:56:38 -07:00
Bennett Buchanan d1efb3b842 Merge pull request #667 from scality/S3C-149/ft/use-request-logger-for-vaultclient
FT: Pass requestUID to vaultclient
2017-04-06 14:42:59 -07:00
Bennett Buchanan 60159e1418 FT: Pass reqUID to vaultclient 2017-04-06 12:44:04 -07:00
Lauren Spiegel 599573deb6 Merge pull request #683 from scality/S3C-206-fail-healthchecks-properly
Fail healthchecks if any on multiple data backends fail
2017-04-06 12:41:43 -07:00
Rached Ben Mustapha db4e00faed Fail healthchecks if any on multiple data backends fail 2017-04-06 10:49:38 -07:00
Lauren Spiegel 3c67970d07 Merge pull request #685 from scality/fix/copyversioning
FIX copy with versioning
2017-04-06 10:47:50 -07:00
Nicolas Humbert 7e532cf416 FIX copy with versioning 2017-04-05 17:50:30 -07:00
Lauren Spiegel f44351ce0c Merge pull request #684 from scality/ft/version-encoding
rf: change encoding of version ids [S3C-184]
2017-04-05 16:58:58 -07:00
Electra Chong b5b943741f rf: change encoding of version ids 2017-04-05 16:13:07 -07:00
Lauren Spiegel 00a26a32b9 Merge pull request #682 from scality/fix/mfadelete
bf: versioning - check if MfaDelete is enabled
2017-04-05 15:56:39 -07:00
Nicolas Humbert f534cc1088 bf: versioning - check if MfaDelete is enabled 2017-04-05 14:44:59 -07:00
Lauren Spiegel f381302ff8 Merge pull request #681 from scality/fix/bucketfile
fix: check for numeric filtering results instead of boolean
2017-04-05 10:35:10 -07:00
Vinh Tao 68e4608d6b fix: bucketfile to check the correct filter results 2017-04-04 22:25:59 +02:00
Vinh Tao 9e4cd90017 test: bucketfile to check the correct filter results 2017-04-04 22:25:59 +02:00
Lauren Spiegel f6ad4c5110 Merge pull request #679 from scality/bf/dataFileMemBackends
bf/Handle dataFile and datatMem backend names
2017-04-04 13:12:03 -07:00
Dora Korpar 6310f4d325 Handle dataFile and datatMem backend names 2017-04-04 11:42:05 -07:00
Lauren Spiegel 7b60f60f0b Merge pull request #678 from scality/ft/versioningpolicy
ft: proper versioning action for Vault
2017-04-04 10:41:34 -07:00
Nicolas Humbert 1edb581bb0 ft: proper versioning action for Vault
S3C-156
2017-04-03 14:11:54 -07:00
Lauren Spiegel 84229c1a3c Merge pull request #649 from Tiduster/tiduster/no-root
Tiduster/no root
2017-04-03 11:24:21 -07:00
Christian Patry f190e9186d DOCKER: improve image purge and add unprivileged user section in readme
Signed-off-by: Christian Patry <christian.patry@polyconseil.fr>
2017-04-03 13:42:56 +02:00
Electra Chong ae5a81f1cd Merge pull request #651 from scality/ft/vsp
s3 versioning api [S3C-44]
2017-04-01 10:18:59 -07:00
Electra Chong ec2c123684 fix: update aws-sdk version for install_ft_deps 2017-04-01 09:38:46 -07:00
Electra Chong ea928f0e9f test: objectHead 2017-04-01 09:38:46 -07:00
Electra Chong 0cc7df6382 test: objectPutCopyPart 2017-04-01 09:38:46 -07:00
Vinh Tao 86cabedf27 test: general versioning 2017-04-01 09:38:46 -07:00
Vinh Tao 4c99e25ce6 test: fixes and clean-up 2017-04-01 09:38:46 -07:00
Vinh Tao f22be3850d test: update completeMPU 2017-04-01 09:38:46 -07:00
Vinh Tao add060a35a test: objectCopy 2017-04-01 09:38:46 -07:00
Vinh Tao 4256c94992 test: objectACL
Apparently aws-sdk version 2.2.28 does not support versioning ACL
operations (error "UnexpectedParameter: Unexpected key 'VersionId'
found in params"), upgrading integration's aws-sdk version to 2.28.0
solves the issue. Depends on scality/Integration#483
2017-04-01 09:38:45 -07:00
Vinh Tao aeacd163f7 test: multiObjectDelete 2017-04-01 09:38:45 -07:00
Vinh Tao 98ecb15ada test: list object versions 2017-04-01 09:38:45 -07:00
Electra Chong a3f31a95ca test: basic functional tests 2017-04-01 09:38:45 -07:00
Vinh Tao aab7cac02b test: update existing unit tests 2017-04-01 09:38:45 -07:00
Electra Chong 3c290dff1d ft: versioning bucketDelete 2017-04-01 09:38:45 -07:00
Electra Chong 0527badabe ft: versioning multiObjectDelete 2017-04-01 09:38:45 -07:00
Vinh Tao 3b6598650d ft: versioning objectPutCopyPart 2017-04-01 09:38:45 -07:00
Vinh Tao b21d9ac9bc ft: versioning objectCopy 2017-04-01 09:38:45 -07:00
Vinh Tao bd40bb506f ft: versioning objectGetACL 2017-04-01 09:38:44 -07:00
Vinh Tao ab2b1867ed ft: versioning objectPutACL 2017-04-01 09:38:44 -07:00
Vinh Tao dcccbf09a6 ft: versioning listMultipartUploads 2017-04-01 09:38:44 -07:00
Vinh Tao 633da4c13c ft: versioning completeMultipartUpload 2017-04-01 09:38:44 -07:00
Vinh Tao e200b3334f ft: versioning objectHead 2017-04-01 09:38:44 -07:00
Vinh Tao 85b173361b ft: versioning bucketGet 2017-04-01 09:38:44 -07:00
Vinh Tao 700364d3dd ft: versioning objectGet api 2017-04-01 09:38:44 -07:00
Vinh Tao f9ef5a9b8c ft: versioning objectDelete api 2017-04-01 09:38:44 -07:00
Vinh Tao 3edd311a24 ft: versioning for objectPut api 2017-04-01 09:38:43 -07:00
Vinh Tao 4e7eb9231e ft: versioning for metadata local backends 2017-04-01 09:38:43 -07:00
Vinh Tao 84d5084587 ft: versioning tools 2017-04-01 09:38:43 -07:00
Vinh Tao 67745772de clean: some optimization and dev dependencies 2017-03-31 14:28:33 -07:00
Lauren Spiegel 362c579cf4 Merge pull request #665 from scality/FT/issuetemplate
FT: add issue template
2017-03-30 17:48:08 -07:00
Nicolas Humbert fbd0e01689 FT: add issue and pull request template 2017-03-30 17:20:47 -07:00
Lauren Spiegel c02af7f814 Merge pull request #676 from scality/forward/rel/6.4
FWD: forward/rel/6.4 to master
2017-03-30 17:16:10 -07:00
Bennett Buchanan 146dd0ca9a Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-03-30 16:23:06 -07:00
Lauren Spiegel be072cdc62 Merge pull request #675 from scality/bf/rangeemptyfile
BF: get empty object with valid/invalid range
2017-03-30 16:19:03 -07:00
Nicolas Humbert 3765a68bcf BF: get empty object with valid/invalid range
FIXES: #660
2017-03-30 15:35:37 -07:00
Lauren Spiegel 1fcb189a94 Merge pull request #670 from scality/dev/defaultlocation
BF: default location for non-listed endpoint
2017-03-29 15:33:57 -07:00
Nicolas Humbert 78d7c91a60 BF: default location for non-listed endpoint
FIXES #669
2017-03-29 14:13:24 -07:00
Lauren Spiegel 5579442ec9 Merge pull request #674 from scality/forward/rel/6.4
Forward/rel/6.4
2017-03-29 14:04:37 -07:00
Dora Korpar 4b2e2b9704 Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-03-29 13:07:23 -07:00
Lauren Spiegel fc8c48d3db Merge pull request #668 from scality/ft/setSproxydPath
Make sproxyd path customizable
2017-03-29 10:40:44 -07:00
Dora Korpar 19cc6eb9ba Make sproxyd path customizable 2.0 & fix chordCos parsing 2017-03-28 17:09:36 -07:00
Lauren Spiegel 5b1633ac63 Merge pull request #661 from scality/fx/locationerror
FIX: return invalid location constraint error
2017-03-28 15:39:40 -07:00
Nicolas Humbert 803ad60bb3 FIX: return invalid location constraint error 2017-03-28 10:50:05 -07:00
Lauren Spiegel 3a036de12b Merge pull request #663 from scality/bf/flushWriteStreamBeforeFsync
Flush write stream buffers before fsync()
2017-03-27 16:45:46 -07:00
Jonathan Gramain f341c98e71 Flush write stream buffers before fsync()
This should fix corruption issue when shutting down the power, by
forcing fsync() to be called after all user-space buffers have been
flushed to the OS.

Credits to @RemiCardona for spotting the root cause of the issue

fixes #662
2017-03-24 18:08:43 -07:00
Lauren Spiegel 1bddac12e3 Merge pull request #659 from scality/ft/multipleBackendHealthcheck
Implement healthcheck for multiple backend
2017-03-24 14:33:20 -07:00
Dora Korpar d4b5ff661a Fix tests not running 2017-03-24 10:58:28 -07:00
Dora Korpar e6de5b6e3c Implement healthcheck for multiple backends 2017-03-24 10:58:28 -07:00
Lauren Spiegel 84a7e6f59d Merge pull request #650 from scality/bf/sproxydConfigParse
Use precise key to assign sproxyd
2017-03-21 17:23:24 -07:00
Dora Korpar 8cee39c2ad Use precise key to assign sproxyd 2017-03-21 15:40:24 -07:00
Lauren Spiegel cf0e32b4d1 Merge pull request #657 from scality/fix/dockerdoc
FIX: updating Docker configuration documentation
2017-03-21 15:30:01 -07:00
Nicolas Humbert 0c5154819e FIX: updating Docker configuration documentation
FIXES: 654
2017-03-21 13:40:12 -07:00
Lauren Spiegel 56e227355a Merge pull request #631 from scality/dev/S3C-50-handle-100-continue
FT: Support Expect header field
2017-03-21 13:37:04 -07:00
Bennett Buchanan 9596f5ea22 FT: Support Expect header field
Fix #510
2017-03-21 12:37:06 -07:00
Bennett Buchanan b1c7e2c501 Refactor: Move XML parsing to APIs
Fix #265
2017-03-21 12:36:58 -07:00
Lauren Spiegel 3e1d52dd41 Merge pull request #656 from scality/forward/rel/6.4
Forward/rel/6.4
2017-03-21 11:20:58 -07:00
Lauren Spiegel e3473c5f28 Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-03-21 10:41:32 -07:00
Lauren Spiegel 1b14088038 Merge pull request #637 from scality/ft/multipleEnvVariable
Add mutiple env variable option
2017-03-21 09:49:46 -07:00
Dora Korpar a34c3ddab8 Add mutiple env variable option 2017-03-20 18:42:49 -07:00
Lauren Spiegel e1700d6841 Merge pull request #610 from scality/dev/dockerssl
dev: Running S3 Docker with SSL
2017-03-17 11:47:05 -07:00
Nicolas Humbert 925fe724b4 dev: Run Docker with SSL 2017-03-15 13:01:03 -07:00
Lauren Spiegel 9fccec21ee Merge pull request #645 from scality/forward/rel/6.4
Forward/rel/6.4
2017-03-14 13:47:11 -07:00
Lauren Spiegel e342716607 Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-03-14 12:00:25 -07:00
Lauren Spiegel 9999cf7bc8 Merge pull request #641 from scality/AHohen-patch-1
Update README.md
2017-03-13 11:42:24 -07:00
Anne Hohenberger 8c9f1cd077 Update README.md
Adding "Scality" to the description.
2017-03-13 11:13:06 -07:00
Lauren Spiegel fae2c494c0 Merge pull request #639 from scality/cloudberry2
CloudBerry Lab FREE
2017-03-10 15:34:45 -08:00
Evgeny Rudinsky d6a3d661e0 CloudBerry Lab FREE
Signed-off-by: Evgeny Rudinsky <evgeny.rudinsky@gmail.com>
2017-03-10 14:58:21 -08:00
Lauren Spiegel 626596dcd5 Merge pull request #634 from scality/fix/bucketListing
FIX: Increase bucket listing limit to 10,000
2017-03-08 17:32:25 -08:00
Lauren Spiegel c240d0181b FIX: Increase bucket listing limit to 10,000
Fixes #633
2017-03-08 16:45:36 -08:00
Lauren Spiegel 86ed062300 Merge pull request #623 from scality/ft/keyenvvar
FT: Set one pair of accessKey/secretKey with env variables
2017-03-08 16:29:40 -08:00
Nicolas Humbert 785eb700e0 FT: Set one pair of accessKey/secretKey with env variables 2017-03-08 15:46:56 -08:00
Lauren Spiegel df9ca52506 Merge pull request #629 from scality/ft/separatelocationconfig
FT: Separate location config
2017-03-07 16:18:52 -08:00
Nicolas Humbert facde83c55 ft: Separate location config 2017-03-07 13:41:07 -08:00
Lauren Spiegel c1c5fd7b2d Merge pull request #621 from scality/rf/versioningApi-mdparams
rf: metadata params for versioning S3C-44
2017-03-06 14:13:16 -08:00
Electra Chong 1b939d4420 rf: metadata params for versioning
- reorder metadata wrapper params to precede callback
- pass callback params directly from bucketclient
2017-03-06 11:44:55 -08:00
Lauren Spiegel 28bf60232b Merge pull request #626 from scality/bf/configSproxydParse
Fix logic in parsing of sproxyd config and update tests
2017-03-03 12:30:22 -08:00
Dora Korpar a1aa90ab7b Fix logic in parsing of sproxyd config and update tests 2017-03-02 14:31:49 -08:00
Lauren Spiegel d69c0bd2e6 Merge pull request #624 from scality/log/debugNotWarn
Change warn logs to debug logs
2017-03-02 14:04:58 -08:00
Lauren Spiegel f549a1bc0f Change warn logs to debug logs 2017-03-02 10:29:34 -08:00
Lauren Spiegel e096c4991b Merge pull request #622 from scality/node6/deprecated
node6: worker.suicide and new Buffer() deprecated
2017-03-01 15:01:09 -08:00
Nicolas Humbert 210e224152 new Buffer is deprecated 2017-03-01 12:59:46 -08:00
Nicolas Humbert 38e6044e46 node6: worker.suicide deprecated 2017-03-01 12:59:46 -08:00
Lauren Spiegel 8991faf1b6 Merge pull request #619 from scality/S3C-82-init-uuid
Generate a unique identifier on init
2017-03-01 12:41:28 -08:00
Rached Ben Mustapha 02b796b4c5 Generate a unique identifier on init 2017-03-01 12:00:41 -08:00
Lauren Spiegel a55ed8fde4 Merge pull request #614 from scality/ft/website-test-refactor
FIX: remove auth credential check from websiteHead/Get
2017-03-01 11:55:33 -08:00
Electra Chong f9031e33d0 FIX: Remove credentials check from websiteHead/Get
FIXES #553
2017-03-01 10:48:04 -08:00
Electra Chong 2e405c84b0 RF: Remove zombie dependency from website tests
Not necessary as we can test results using node request module and it was slowing tests down due to the redirect timeouts.
2017-03-01 10:15:20 -08:00
Lauren Spiegel 6f15bd6dbd Merge pull request #588 from scality/ft/node-v6
FT: Node v6
2017-02-28 18:20:33 -08:00
Nicolas Humbert d16bf2881c [FIX] Ceph tests for node-v6
(cherry picked from commit 7f1395fe9599ec7d400d0ba6cd3cc799def3391a)
2017-02-28 15:41:39 +01:00
Alexandre Merle 6e37e1efbe FT: Node v6
Switch to node v6
2017-02-28 15:41:39 +01:00
Lauren Spiegel 4b8c4dbe01 Merge pull request #620 from scality/fix/license_year_update
Update LICENSE
2017-02-27 11:16:42 -08:00
Guillaume Gimenez a06f49bd7e Merge branch 'master' into fix/license_year_update 2017-02-27 10:31:51 -08:00
Lauren Spiegel a29fdc8688 Merge pull request #617 from scality/config/localhost
FIX: Request with hostname localhost returns InvalidURI
2017-02-27 09:56:57 -08:00
Guillaume Gimenez 8fc401536d Update LICENSE 2017-02-24 16:45:39 -08:00
Nicolas Humbert 7024beb748 FIX: Request with hostname localhost returns InvalidURI
FIXES #616
2017-02-24 15:57:30 -08:00
Lauren Spiegel fd8e713746 Merge pull request #603 from scality/ft/queryRoutesNotImplemented
FT: respond NotImplemented to unsupported queries
2017-02-24 15:48:53 -08:00
Electra Chong 5110121d63 RF: Remove legacy bash config files
These files are no longer used in our testing CI and can be removed as we no longer maintain them.
2017-02-24 14:16:53 -08:00
Electra Chong d3b8c078f2 RF: avoid parsing url multiple times 2017-02-24 14:16:53 -08:00
Electra Chong 90e1ccc68b FT: respond NotImplemented to unsupported queries
FIXES #586
2017-02-24 14:16:53 -08:00
Electra Chong 7d4de30d6e FT: upgrade s3cmd to v1.6.1 for s3 ci runs
In previous version (v1.6.0) s3cmd info returns NotImplemented error if some of the queries are not supported. We prefer customers to use the newer version which will print none for any fields that receive NotImplemented error and still displays output for any query requests that succeed (with the exception if ?location query requests are not supported).
2017-02-24 14:16:53 -08:00
Lauren Spiegel 2c02e0c13c Merge pull request #618 from scality/forward/rel/6.4
Fwdport forward/rel/6.4 to master
2017-02-24 14:15:10 -08:00
Bennett Buchanan 91e98df6ce Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-02-24 11:50:52 -08:00
Lauren Spiegel 8232a1a66b Merge pull request #535 from scality/DEV/getBucketLocation
DEV: getBucketLocation and dealing with 2 configs
2017-02-23 16:39:44 -08:00
Nicolas Humbert 576172ba05 FT: Implement bucket locations
1) Add location setting to putBucket
2) Implement getBucketLocation API
3) Handle legacy AWS behavior with new config options
4) Test based on new and old config
2017-02-23 15:25:06 -08:00
Lauren Spiegel 2783f0b4a2 Merge pull request #608 from scality/forward/rel/6.4
Forward rel/6.4 to master
2017-02-15 15:23:06 -05:00
Electra Chong ec9466b816 FIX: large mpu test socket hangup
FIXES scality/Integration#449

Aim to disable aws-node-sdk timeouts to see if it will prevent socket hang ups for this test in end-to-ends.
2017-02-14 16:40:27 -08:00
Lauren Spiegel ea3240b1ec Merge pull request #609 from scality/BF/dotOnly
BF: removing .only in unit tests
2017-02-13 20:13:01 -08:00
Nicolas Humbert e771d8f593 BF: removing .only in unit tests 2017-02-13 16:28:55 -08:00
Lauren Spiegel 62ccd2885c Merge pull request #575 from scality/ft/multipleBackendConfig
Ft/multiple backend config
2017-02-13 15:43:58 -08:00
Dora Korpar 2fbcdf35c0 Set up config for multiple backends 2017-02-13 14:50:24 -08:00
Lauren Spiegel 79247f8802 Merge pull request #593 from scality/COMPAT/websiteGetErrCode
COMPAT: websiteGet and websiteHead with ACLs
2017-02-13 10:13:11 -08:00
Nicolas Humbert 87ab2d31df COMPAT: websiteGet and websiteHead error code
FIXES #573
2017-02-10 14:22:53 -08:00
Lauren Spiegel fb62007a1e Merge pull request #605 from scality/forward/rel/6.4
Forward rel/6.4 to master
2017-02-10 14:10:18 -08:00
Nicolas Humbert 27fd44ac74 Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-02-10 11:56:46 -08:00
Lauren Spiegel 26988aa188 Merge pull request #595 from scality/forward/rel/6.4
Forward rel/6.4 to master
2017-02-07 15:53:23 -08:00
Nicolas Humbert 87b694c20b Merge remote-tracking branch 'origin/rel/6.4' into forward/rel/6.4 2017-02-06 11:33:46 -08:00
Lauren Spiegel 224799f889 Merge pull request #577 from scality/README/awscli-config
README: add AWS CLI configuration
2017-01-31 18:42:53 -08:00
Nicolas Humbert 721a7a4bb4 README: add AWS CLI configuration 2017-01-31 13:25:43 -08:00
Rahul Padigela 0625484ba0 Merge pull request #574 from scality/fwdport/6.4-master
Fwdport/6.4 master
2017-01-27 18:38:01 -08:00
Rahul Padigela 48c3d244a2 Merge remote-tracking branch 'origin/rel/6.4' into fwdport/6.4-master 2017-01-27 17:37:44 -08:00
Lauren Spiegel 28c9dad3da Merge pull request #532 from scality/Forward-rel/6.4-to-master
Forward rel/6.4 to master
2017-01-19 16:14:05 -08:00
David Pineau cf269ff65c Merge remote-tracking branch 'origin/rel/6.4' into Forward-rel/6.4-to-master 2017-01-19 12:15:49 +01:00
332 changed files with 23480 additions and 6196 deletions

View File

@ -1,7 +0,0 @@
{
"plugins": [
"transform-es2015-destructuring",
"transform-es2015-modules-commonjs",
"transform-es2015-parameters"
]
}

54
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,54 @@
# Issue template
If you are reporting a new issue, make sure that we do not have any
duplicates already open. You can ensure this by searching the issue list for
this repository. If there is a duplicate, please close your issue and add a
comment to the existing issue instead.
## General support information
GitHub Issues are reserved for actionable bug reports and feature requests.
General questions should be sent to the
[S3 scality server Forum](http://forum.scality.com/).
## Bug report information
(delete this section if not applicable)
### Description
Briefly describe the problem you are having in a few paragraphs.
### Steps to reproduce the issue
Please provide steps to reproduce, including full log output
### Actual result
Describe the results you received
### Expected result
Describe the results you expecteds
### Additional information: (Node.js version, Docker version, etc)
## Feature Request
(delete this section if not applicable)
### Proposal
Describe the feature
### Current behavior
What currently happens
### Desired behavior
What you would like to happen
### Use case
Please provide use cases for changing the current behavior

28
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,28 @@
# Pull request template
## Description
### Motivation and context
Why is this change required? What problem does it solve?
### Related issues
Please use the following link syntaxes #600 to reference issues in the
current repository
## Checklist
### Add tests to cover the changes
New tests added or existing tests modified to cover all changes
### Code conforms with the [style guide](https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md#coding-style-guidelines)
### Sign your work
In order to contribute to the project, you must sign your work
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md#sign-your-work
Thank you again for contributing! We will try to test and integrate the change
as soon as we can.

View File

@ -56,6 +56,8 @@ Right now, the following operations are implemented:
- Put Bucket Website
- Get Bucket Website
- Delete Bucket Website
- Put Bucket Versioning
- Get Bucket Versioning
- v2 Authentication
- v4 Authentication (Transferring Payload in a Single Chunk)
- v4 Authentication (Transferring Payload in Multiple Chunks)

118
DOCKER.md
View File

@ -1,118 +0,0 @@
# Using S3 for continuous integration testing or in production with Docker
* [For continuous integration with Docker](#for-continuous-integration-with-docker)
* [Environment Variables](#environment-variables)
* [In production with Docker](#in-production-with-docker)
* [Using Docker Volume in production](#using-docker-volume-in-production)
* [Adding modifying or deleting accounts or users credentials](#adding-modifying-or-deleting-accounts-or-users-credentials)
* [Specifying your own host name](#specifying-your-own-host-name)
## For continuous integration with Docker
When you start the Docker Scality S3 server image, you can adjust the
configuration of the Scality S3 server instance by passing one or more
environment variables on the docker run command line.
### Environment Variables
#### HOST_NAME
This variable specifies a host name.
If you have a domain such as example.com, by specifying that here,
you and your users can direct s3 server requests to example.com.
```shell
docker run -d --name s3server -p 8000:8000 -e HOST_NAME=new.host.com scality/s3server
```
#### ACCESS_KEY and SECRET_KEY
These variables specify authentication credentials for an account
named "Docker".
You can set credentials for many accounts by editing `conf/authdata.json`
(see below for further info),
but if you just want to specify one set of your own,
you can use these environment variables.
```shell
docker run -d --name s3server -p 8000:8000 -e ACCESS_KEY=newAccessKey -e
SECRET_KEY=newSecretKey scality/s3server
```
#### LOG_LEVEL
This variable allows you to change the log level: info, debug or trace.
The default is info. Debug will give you more detailed logs and trace
will give you the most detailed.
```shell
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
```
## In production with Docker
### Using Docker Volume in production
S3 server runs with a file backend by default.
So, by default, the data is stored inside your S3 server Docker container.
However, if you want your data and metadata to persist, you **MUST** use Docker
volumes to host your data and metadata outside your s3 server Docker container.
Otherwise, the data and metadata will be destroyed when you erase the container.
```shell
docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata
-p 8000:8000 ­-d scality/s3server
```
This command mounts the host directory, `./data`, into the container at
/usr/src/app/localData and the host directory, `./metadata`, into the container
at /usr/src/app/localMetaData. It can also be any host mount point,
like `/mnt/data` and `/mnt/metadata`.
### Adding modifying or deleting accounts or users credentials
1. Create locally a customized `authdata.json`.
2. Use [Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/)
to override the default `authdata.json` through a docker file mapping.
For example:
```shell
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
scality/s3server
```
### Specifying your own host name
To specify a host name (e.g. s3.domain.name),
you can provide your own
[config.json](https://github.com/scality/S3/blob/master/config.json)
using [Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/).
First add a new key-value pair in the regions section of your config.json.
The key in the key-value pair should be your "region" name and the value
is an array containing any host name you would like to add:
```json
"regions": {
...
"localregion": ["localhost"],
"specifiedregion": ["s3.domain.name"]
},
```
Then, run your Scality S3 Server using
[Docker Volume](https://docs.docker.com/engine/tutorials/dockervolumes/):
```shell
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
```
Your local `config.json` file will override the default one through a docker
file mapping.

View File

@ -1,4 +1,4 @@
FROM node:4-slim
FROM node:6-slim
MAINTAINER Giorgio Regni <gr@scality.com>
WORKDIR /usr/src/app
@ -6,9 +6,9 @@ WORKDIR /usr/src/app
COPY . /usr/src/app
RUN apt-get update \
&& apt-get install -y python git build-essential --no-install-recommends \
&& apt-get install -y jq python git build-essential --no-install-recommends \
&& npm install --production \
&& apt-get autoremove -y python build-essential \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& npm cache clear \
&& rm -rf ~/.node-gyp \

View File

@ -1,4 +1,4 @@
FROM node:4-slim
FROM node:6-slim
MAINTAINER Giorgio Regni <gr@scality.com>
WORKDIR /usr/src/app
@ -8,7 +8,7 @@ COPY . /usr/src/app
RUN apt-get update \
&& apt-get install -y python git build-essential --no-install-recommends \
&& npm install --production \
&& apt-get autoremove -y python build-essential \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& npm cache clear \
&& rm -rf ~/.node-gyp \

View File

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2016 Scality
Copyright 2015-2017 Scality
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

362
README.md
View File

@ -1,12 +1,20 @@
# S3 Server
# Scality S3 Server
![S3 Server logo](res/Scality-S3-Server-Logo-Large.png)
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
[![Docker Pulls][badgetwitter]](https://twitter.com/s3server)
## Learn more at [s3.scality.com](http://s3.scality.com)
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
## Docker
[Run your S3 server with Docker](https://hub.docker.com/r/scality/s3server/)
## Contributing
In order to contribute, please follow the
@ -17,7 +25,7 @@ https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
### Dependencies
Building and running the S3 Server requires node.js 4.5 and npm v2
Building and running the Scality S3 Server requires node.js 6.9.5 and npm v3
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
@ -41,7 +49,10 @@ npm install
npm start
```
This starts an S3 server on port 8000.
This starts an S3 server on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
@ -61,6 +72,35 @@ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
npm start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
npm start
```
This starts an S3 server on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section below to learn how to set
location constraints.
## Run it with an in-memory backend
```shell
@ -71,319 +111,7 @@ This starts an S3 server on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it for continuous integration testing or in production with Docker
[DOCKER.md](DOCKER.md)
## Testing
You can run the unit tests with the following command:
```shell
npm test
```
You can run the linter with:
```shell
npm run lint
```
Running functional tests locally:
The test suite requires additional tools, **s3cmd** and **Redis** installed
in the environment the tests are running in.
* Install [s3cmd](http://s3tools.org/download)
* Install [redis](https://redis.io/download) and start Redis.
* Add localCache section to your `config.json`:
```
"localCache": {
"host": REDIS_HOST,
"port": REDIS_PORT
}
```
where `REDIS_HOST` is your Redis instance IP address (`"127.0.0.1"` if your
Redis is running locally)
and `REDIS_PORT` is your Redis instance port (`6379` by default)
* Add the following to the etc/hosts file on your machine:
```shell
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
```
* Start the S3 server in memory and run the functional tests:
```shell
npm run mem_backend
npm run ft_test
```
## Configuration
If you want to specify an endpoint (other than localhost),
you need to add it to your config.json:
```json
"regions": {
"localregion": ["localhost"],
"specifiedregion": ["myhostname.com"]
},
```
Note that our S3server supports both:
- path-style: http://myhostname.com/mybucket
- hosted-style: http://mybucket.myhostname.com
However, hosted-style requests will not hit the server if you are
using an ip address for your host.
So, make sure you are using path-style requests in that case.
For instance, if you are using the AWS SDK for JavaScript,
you would instantiate your client like this:
```js
const s3 = new aws.S3({
endpoint: 'http://127.0.0.1:8000',
s3ForcePathStyle: true,
});
```
[badgetwitter]: https://img.shields.io/twitter/follow/s3server.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
## Getting started: List of applications that have been tested with S3 Server
### GUI
#### [Cyberduck](https://cyberduck.io/?l=en)
- https://www.youtube.com/watch?v=-n2MCt4ukUg
- https://www.youtube.com/watch?v=IyXHcu4uqgU
#### [Cloud Explorer](https://www.linux-toys.com/?p=945)
- https://www.youtube.com/watch?v=2hhtBtmBSxE
### Command Line Tools
#### [s3curl](https://github.com/rtdp/s3curl)
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
#### [aws-cli](http://docs.aws.amazon.com/cli/latest/reference/)
`~/.aws/credentials` on Linux, OS X, or Unix or
`C:\Users\USERNAME\.aws\credentials` on Windows
```shell
[default]
aws_access_key_id = accessKey1
aws_secret_access_key = verySecretKey1
```
See all buckets:
```shell
aws s3 ls --endpoint-url=http://localhost:8000
```
#### [s3cmd](http://s3tools.org/s3cmd)
If using s3cmd as a client to S3 be aware that v4 signature format
is buggy in s3cmd versions < 1.6.1.
`~/.s3cfg` on Linux, OS X, or Unix or
`C:\Users\USERNAME\.s3cfg` on Windows
```shell
[default]
access_key = accessKey1
secret_key = verySecretKey1
host_base = localhost:8000
host_bucket = %(bucket).localhost:8000
signature_v2 = False
use_https = False
```
See all buckets:
```shell
s3cmd ls
```
#### [rclone](http://rclone.org/s3/)
`~/.rclone.conf` on Linux, OS X, or Unix or
`C:\Users\USERNAME\.rclone.conf` on Windows
```shell
[remote]
type = s3
env_auth = false
access_key_id = accessKey1
secret_access_key = verySecretKey1
region = other-v2-signature
endpoint = http://localhost:8000
location_constraint =
acl = private
server_side_encryption =
storage_class =
```
See all buckets:
```shell
rclone lsd remote:
```
### JavaScript
#### [AWS JavaScript SDK](http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html)
```javascript
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
endpoint: 'localhost:8000',
sslEnabled: false,
s3ForcePathStyle: true,
});
```
### JAVA
#### [AWS JAVA SDK](http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html)
```java
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
import com.amazonaws.services.s3.model.Bucket;
public class S3 {
public static void main(String[] args) {
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
"verySecretKey1");
// Create a client connection based on credentials
AmazonS3 s3client = new AmazonS3Client(credentials);
s3client.setEndpoint("http://localhost:8000");
// Using path-style requests
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Create bucket
String bucketName = "javabucket";
s3client.createBucket(bucketName);
// List off all buckets
for (Bucket bucket : s3client.listBuckets()) {
System.out.println(" - " + bucket.getName());
}
}
}
```
## Ruby
### [AWS SDK for Ruby - Version 2](http://docs.aws.amazon.com/sdkforruby/api/)
```ruby
require 'aws-sdk'
s3 = Aws::S3::Client.new(
:access_key_id => 'accessKey1',
:secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:force_path_style => true
)
resp = s3.list_buckets
```
#### [fog](http://fog.io/storage/)
```ruby
require "fog"
connection = Fog::Storage.new(
{
:provider => "AWS",
:aws_access_key_id => 'accessKey1',
:aws_secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:path_style => true,
:scheme => 'http',
})
```
### Python
#### [boto2](http://boto.cloudhackers.com/en/latest/ref/s3.html)
```python
import boto
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
connection = S3Connection(
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
is_secure=False,
port=8000,
calling_format=OrdinaryCallingFormat(),
host='localhost'
)
connection.create_bucket('mybucket')
```
#### [boto3](http://boto3.readthedocs.io/en/latest/index.html)
``` python
import boto3
client = boto3.client(
's3',
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
endpoint_url='http://localhost:8000'
)
lists = client.list_buckets()
```
### PHP
Should use v3 over v2 because v2 would create virtual-hosted style URLs
while v3 generates path-style URLs.
#### [AWS PHP SDK v3](https://docs.aws.amazon.com/aws-sdk-php/v3/guide)
```php
use Aws\S3\S3Client;
$client = S3Client::factory([
'region' => 'us-east-1',
'version' => 'latest',
'endpoint' => 'http://localhost:8000',
'credentials' => [
'key' => 'accessKey1',
'secret' => 'verySecretKey1'
]
]);
$client->createBucket(array(
'Bucket' => 'bucketphp',
));
```

2
_config.yml Normal file
View File

@ -0,0 +1,2 @@
---
theme: jekyll-theme-minimal

View File

@ -2,5 +2,4 @@
// 2>/dev/null ; exec "$(which nodejs || which node)" "$0" "$@"
'use strict'; // eslint-disable-line strict
require('babel-core/register');
require('../lib/kms/utilities.js').createEncryptedBucket();

View File

@ -1,5 +1,4 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
require('babel-core/register');
require('../lib/utapi/utilities.js').listMetrics('buckets');

View File

@ -1,5 +1,4 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
require('babel-core/register');
require('../lib/utapi/utilities.js').listMetrics();

View File

@ -8,7 +8,7 @@ general:
machine:
node:
version: 4.5.0
version: 6.9.5
ruby:
version: "2.4.1"
services:
@ -42,6 +42,21 @@ test:
- mkdir -p $CIRCLE_TEST_REPORTS/unit
- npm run unit_coverage
- npm run start_dmd & bash wait_for_local_port.bash 9990 40
&& npm run multiple_backend_test
# Run S3 with multiple data backends ; run ft_awssdk
- S3BACKEND=mem MPU_TESTING=yes S3DATA=multiple npm start
> $CIRCLE_ARTIFACTS/server_multiple_awssdk.txt
& bash wait_for_local_port.bash 8000 40
&& S3DATA=multiple npm run ft_awssdk
# Run S3 with multiple data backends + KMS Encryption; run ft_awssdk
- S3BACKEND=mem MPU_TESTING=yes S3DATA=multiple npm start
> $CIRCLE_ARTIFACTS/server_multiple_kms_awssdk.txt
& bash wait_for_local_port.bash 8000 40
&& S3DATA=multiple ENABLE_KMS_ENCRYPTION=true npm run ft_awssdk
# Run S3 with mem Backend ; run ft_tests
- S3BACKEND=mem npm start
> $CIRCLE_ARTIFACTS/server_mem_java.txt

View File

@ -1,24 +1,13 @@
{
"port": 8000,
"listenOn": [],
"regions": {
"ap-northeast-1": ["s3.ap-northeast-1.amazonaws.com"],
"ap-southeast-1": ["s3.ap-southeast-1.amazonaws.com"],
"ap-southeast-2": ["s3.ap-southeast-2.amazonaws.com"],
"eu-central-1": ["s3.eu-central-1.amazonaws.com",
"s3.eu.central-1.amazonaws.com"],
"eu-west-1": ["s3.eu-west-1.amazonaws.com"],
"sa-east-1": ["s3.sa-east-1.amazonaws.com"],
"us-east-1": ["s3.amazonaws.com",
"s3-external-1.amazonaws.com",
"s3.us-east-1.amazonaws.com"],
"us-west-1": ["s3.us-west-1.amazonaws.com"],
"us-west-2": ["s3-us-west-2.amazonaws.com"],
"us-gov-west-1": ["s3-us-gov-west-1.amazonaws.com",
"s3-fips-us-gov-west-1.amazonaws.com"],
"localregion": ["localhost"],
"test-region": ["s3.scality.test"],
"docker-region": ["s3.docker.test"]
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "file",
"127.0.0.1": "file",
"s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1"
},
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
@ -34,9 +23,6 @@
"s3-website-sa-east-1.amazonaws.com",
"s3-website.localhost",
"s3-website.scality.test"],
"sproxyd": {
"bootstrap": ["localhost:8181"]
},
"bucketd": {
"bootstrap": ["localhost"]
},
@ -52,5 +38,20 @@
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"usEastBehavior": false
"metadataClient": {
"host": "localhost",
"port": 9990
},
"dataClient": {
"host": "localhost",
"port": 9991
},
"metadataDaemon": {
"bindAddress": "localhost",
"port": 9990
},
"dataDaemon": {
"bindAddress": "localhost",
"port": 9991
}
}

View File

@ -1,6 +1,6 @@
import crypto from 'crypto';
const crypto = require('crypto');
export default {
const constants = {
/*
* Splitter is used to build the object name for the overview of a
* multipart upload and to build the object names for each part of a
@ -38,6 +38,7 @@ export default {
// by the name of the final destination bucket for the object
// once the multipart upload is complete.
mpuBucketPrefix: 'mpuShadowBucket',
blacklistedPrefixes: { bucket: [], object: [] },
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
// only public resources
@ -68,6 +69,11 @@ export default {
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 104857600 :
5368709120,
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
// In testing, AWS seems to allow up to 88 more bytes, so we do the same.
maximumMetaHeadersSize: 2136,
// hex digest of sha256 hash of empty string:
emptyStringHash: crypto.createHash('sha256')
.update('', 'binary').digest('hex'),
@ -79,16 +85,16 @@ export default {
'inventory': true,
'lifecycle': true,
'list-type': true,
'location': true,
'logging': true,
'metrics': true,
'notification': true,
'policy': true,
'replication': true,
'requestPayment': true,
'restore': true,
'tagging': true,
'torrent': true,
'versions': true,
},
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
};
module.exports = constants;

25
dataserver.js Normal file
View File

@ -0,0 +1,25 @@
'use strict'; // eslint-disable-line strict
const arsenal = require('arsenal');
const { config } = require('./lib/Config.js');
const logger = require('./lib/utilities/logger');
if (config.backends.data === 'file' ||
(config.backends.data === 'multiple' &&
config.backends.metadata !== 'scality')) {
const dataServer = new arsenal.network.rest.RESTServer(
{ bindAddress: config.dataDaemon.bindAddress,
port: config.dataDaemon.port,
dataStore: new arsenal.storage.data.file.DataFileStore(
{ dataPath: config.dataDaemon.dataPath,
log: config.log }),
log: config.log });
dataServer.setup(err => {
if (err) {
logger.error('Error initializing REST data server',
{ error: err });
return;
}
dataServer.start();
});
}

View File

@ -3,15 +3,10 @@
# set -e stops the execution of a script if a command or pipeline has an error
set -e
if [[ "$ACCESS_KEY" && "$SECRET_KEY" ]]; then
sed -i "s/accessKeyDocker/$ACCESS_KEY/" ./conf/authdata.json
sed -i "s/verySecretKeyDocker/$SECRET_KEY/" ./conf/authdata.json
echo "Access key and secret key have been modified successfully"
fi
if [[ "$HOST_NAME" ]]; then
sed -i "s/s3.docker.test/$HOST_NAME/" ./config.json
echo "Host name has been modified to $HOST_NAME"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
fi
if [[ "$LOG_LEVEL" ]]; then
@ -23,4 +18,61 @@ if [[ "$LOG_LEVEL" ]]; then
fi
fi
if [[ "$SSL" ]]; then
if [[ -z "$HOST_NAME" ]]; then
echo "WARNING! No HOST_NAME has been provided"
fi
# This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
## Generate SSL key and certificates
# Generate a private key for your CSR
openssl genrsa -out ca.key 2048
# Generate a self signed certificate for your local Certificate Authority
openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=$SSL"
# Generate a key for S3 Server
openssl genrsa -out server.key 2048
# Generate a Certificate Signing Request for S3 Server
openssl req -new -key server.key -out server.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.$SSL"
# Generate a local-CA-signed certificate for S3 Server
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 99999 -sha256
fi
## Update S3Server config.json
# This condition makes sure that certFilePaths section is not added twice. (for docker restart)
if ! grep -q "certFilePaths" ./config.json; then
sed -i "0,/,/s//,\n \"certFilePaths\": { \"key\": \".\/server.key\", \"cert\": \".\/server.crt\", \"ca\": \".\/ca.crt\" },/" ./config.json
fi
fi
if [[ "$S3DATA" == "multiple" ]]; then
export S3DATA="$S3DATA"
fi
JQ_FILTERS="."
if [[ "$LISTEN_ADDR" ]]; then
JQ_FILTERS="$JQ_FILTERS | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS="$JQ_FILTERS | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS="$JQ_FILTERS | .listenOn=[\"$LISTEN_ADDR:8000\"]"
fi
if [[ "$DATA_HOST" ]]; then
JQ_FILTERS="$JQ_FILTERS | .dataClient.host=\"$DATA_HOST\""
fi
if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS="$JQ_FILTERS | .metadataClient.host=\"$METADATA_HOST\""
fi
if [[ "$REDIS_HOST" ]]; then
JQ_FILTERS="$JQ_FILTERS | .localCache.host=\"$REDIS_HOST\""
JQ_FILTERS="$JQ_FILTERS | .localCache.port=6379"
fi
if [[ "$REDIS_PORT" ]]; then
JQ_FILTERS="$JQ_FILTERS | .localCache.port=$REDIS_PORT"
fi
jq "$JQ_FILTERS" config.json > config.json.tmp
mv config.json.tmp config.json
exec "$@"

916
docs/ARCHITECTURE.rst Normal file
View File

@ -0,0 +1,916 @@
.. role:: raw-latex(raw)
:format: latex
..
Architecture
++++++++++++
Versioning
==========
This document describes S3 Server's support for the AWS S3 Bucket
Versioning feature.
AWS S3 Bucket Versioning
------------------------
See AWS documentation for a description of the Bucket Versioning
feature:
- `Bucket
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html>`__
- `Object
Versioning <http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectVersioning.html>`__
This document assumes familiarity with the details of Bucket Versioning,
including null versions and delete markers, described in the above
links.
Implementation of Bucket Versioning in S3
-----------------------------------------
Overview of Metadata and API Component Roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Each version of an object is stored as a separate key in metadata. The
S3 API interacts with the metadata backend to store, retrieve, and
delete version metadata.
The implementation of versioning within the metadata backend is naive.
The metadata backend does not evaluate any information about bucket or
version state (whether versioning is enabled or suspended, and whether a
version is a null version or delete marker). The S3 front-end API
manages the logic regarding versioning information, and sends
instructions to metadata to handle the basic CRUD operations for version
metadata.
The role of the S3 API can be broken down into the following:
- put and delete version data
- store extra information about a version, such as whether it is a
delete marker or null version, in the object's metadata
- send instructions to metadata backend to store, retrieve, update and
delete version metadata based on bucket versioning state and version
metadata
- encode version ID information to return in responses to requests, and
decode version IDs sent in requests
The implementation of Bucket Versioning in S3 is described in this
document in two main parts. The first section, `"Implementation of
Bucket Versioning in
Metadata" <#implementation-of-bucket-versioning-in-metadata>`__,
describes the way versions are stored in metadata, and the metadata
options for manipulating version metadata.
The second section, `"Implementation of Bucket Versioning in
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
the metadata options are used in the API within S3 actions to create new
versions, update their metadata, and delete them. The management of null
versions and creation of delete markers are also described in this
section.
Implementation of Bucket Versioning in Metadata
-----------------------------------------------
As mentioned above, each version of an object is stored as a separate
key in metadata. We use version identifiers as the suffix for the keys
of the object versions, and a special version (the `"Master
Version" <#master-version>`__) to represent the latest version.
An example of what the metadata keys might look like for an object
``foo/bar`` with three versions (with `.` representing a null character):
+------------------------------------------------------+
| key |
+======================================================+
| foo/bar |
+------------------------------------------------------+
| foo/bar.098506163554375999999PARIS 0.a430a1f85c6ec |
+------------------------------------------------------+
| foo/bar.098506163554373999999PARIS 0.41b510cd0fdf8 |
+------------------------------------------------------+
| foo/bar.098506163554373999998PARIS 0.f9b82c166f695 |
+------------------------------------------------------+
The most recent version created is represented above in the key
``foo/bar`` and is the master version. This special version is described
further in the section `"Master Version" <#master-version>`__.
Version ID and Metadata Key Format
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The version ID is generated by the metadata backend, and encoded in a
hexadecimal string format by S3 before sending a response to a request.
S3 also decodes the hexadecimal string received from a request before
sending to metadata to retrieve a particular version.
The format of a ``version_id`` is: ``ts`` ``rep_group_id`` ``seq_id``
where:
- ``ts``: is the combination of epoch and an increasing number
- ``rep_group_id``: is the name of deployment(s) considered one unit
used for replication
- ``seq_id``: is a unique value based on metadata information.
The format of a key in metadata for a version is:
``object_name separator version_id`` where:
- ``object_name``: is the key of the object in metadata
- ``separator``: we use the ``null`` character (``0x00`` or ``\0``) as
the separator between the ``object_name`` and the ``version_id`` of a
key
- ``version_id``: is the version identifier; this encodes the ordering
information in the format described above as metadata orders keys
alphabetically
An example of a key in metadata:
``foo\01234567890000777PARIS 1234.123456`` indicating that this specific
version of ``foo`` was the ``000777``\ th entry created during the epoch
``1234567890`` in the replication group ``PARIS`` with ``1234.123456``
as ``seq_id``.
Master Version
~~~~~~~~~~~~~~
We store a copy of the latest version of an object's metadata using
``object_name`` as the key; this version is called the master version.
The master version of each object facilitates the standard GET
operation, which would otherwise need to scan among the list of versions
of an object for its latest version.
The following table shows the layout of all versions of ``foo`` in the
first example stored in the metadata (with dot ``.`` representing the
null separator):
+----------+---------+
| key | value |
+==========+=========+
| foo | B |
+----------+---------+
| foo.v2 | B |
+----------+---------+
| foo.v1 | A |
+----------+---------+
Metadata Versioning Options
~~~~~~~~~~~~~~~~~~~~~~~~~~~
S3 Server sends instructions to the metadata engine about whether to
create a new version or overwrite, retrieve, or delete a specific
version by sending values for special options in PUT, GET, or DELETE
calls to metadata. The metadata engine can also list versions in the
database, which is used by S3 to list object versions.
These only describe the basic CRUD operations that the metadata engine
can handle. How these options are used by the S3 API to generate and
update versions is described more comprehensively in `"Implementation of
Bucket Versioning in
API" <#implementation-of-bucket-versioning-in-api>`__.
Note: all operations (PUT and DELETE) that generate a new version of an
object will return the ``version_id`` of the new version to the API.
PUT
^^^
- no options: original PUT operation, will update the master version
- ``versioning: true`` create a new version of the object, then update
the master version with this version.
- ``versionId: <versionId>`` update a specific version (for updating
version's ACL or tags, or remote updates in geo-replication
- if the version identified by ``versionId`` happens to be the latest
version, the master version will be updated as well
- note that with ``versionId`` set to an empty string ``''``, it will
overwrite the master version only (same as no options, but the master
version will have a ``versionId`` property set in its metadata like
any other version). The ``versionId`` will never be exposed to an
external user, but setting this internal-only ``versionID`` enables
S3 to find this version later if it is no longer the master. This
option of ``versionId`` set to ``''`` is used for creating null
versions once versioning has been suspended, which is discussed in
`"Null Version Management" <#null-version-management>`__.
Only one option is used at a time. ``versionId: <versionId>`` does not
have to be used with ``versioning: true`` set to work, nor should they
both be set. If both are used at once, the metadata engine will return
an error.
To summarize the valid combinations of versioning options:
- ``!versioning && !versionId``: normal non-versioning PUT
- ``versioning && !versionId``: create a new version, update the master
version
- ``!versioning && versionId``: update (PUT/DELETE) an existing version
- if ``versionId === ''`` update master version
Other cases are invalid and the metadata engine returns the error
``BadRequest``.
DELETE
^^^^^^
- no options: original DELETE operation, will delete the master version
- ``versionId: <versionId>`` delete a specific version
A deletion targeting the latest version of an object has to:
- delete the specified version identified by ``versionId``
- replace the master version with a version that is a placeholder for
deletion
- this version contains a special keyword, 'isPHD', to indicate the
master version was deleted and needs to be updated
- initiate a repair operation to update the value of the master
version:
- involves listing the versions of the object and get the latest
version to replace the placeholder delete version
- if no more versions exist, metadata deletes the master version,
removing the key from metadata
Note: all of this happens before responding to S3, and only when the
metadata engine is instructed by S3 to delete a specific version or the
master version. See section `"Delete Markers" <#delete-markers>`__ for a
description of what happens when a Delete Object request is sent to the
S3 API.
GET
^^^
- no options: original GET operation, will get the master version
- ``versionId: <versionId>`` retrieve a specific version
The implementation of a GET operation does not change compared to the
standard version. A standard GET without versioning information would
get the master version of a key. A version-specific GET would retrieve
the specific version identified by the key for that version.
LIST
^^^^
For a standard LIST on a bucket, metadata iterates through the keys by
using the separator (``\0``, represented by ``.`` in examples) as an
extra delimiter. For a listing of all versions of a bucket, there is no
change compared to the original listing function. Instead, the API
component returns all the keys in a List Objects call and filters for
just the keys of the master versions in a List Object Versions call.
For example, a standard LIST operation against the keys in a table below
would return from metadata the list of
``[ foo/bar, bar, qux/quz, quz ]``.
+--------------+
| key |
+==============+
| foo/bar |
+--------------+
| foo/bar.v2 |
+--------------+
| foo/bar.v1 |
+--------------+
| bar |
+--------------+
| qux/quz |
+--------------+
| qux/quz.v2 |
+--------------+
| qux/quz.v1 |
+--------------+
| quz |
+--------------+
| quz.v2 |
+--------------+
| quz.v1 |
+--------------+
Implementation of Bucket Versioning in API
------------------------------------------
Object Metadata Versioning Attributes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To access all the information needed to properly handle all cases that
may exist in versioned operations, the API stores certain
versioning-related information in the metadata attributes of each
version's object metadata.
These are the versioning-related metadata properties:
- ``isNull``: whether the version being stored is a null version.
- ``nullVersionId``: the unencoded version ID of the latest null
version that existed before storing a non-null version.
- ``isDeleteMarker``: whether the version being stored is a delete
marker.
The metadata engine also sets one additional metadata property when
creating the version.
- ``versionId``: the unencoded version ID of the version being stored.
Null versions and delete markers are described in further detail in
their own subsections.
Creation of New Versions
~~~~~~~~~~~~~~~~~~~~~~~~
When versioning is enabled in a bucket, APIs which normally result in
the creation of objects, such as Put Object, Complete Multipart Upload
and Copy Object, will generate new versions of objects.
S3 creates a new version and updates the master version using the
``versioning: true`` option in PUT calls to the metadata engine. As an
example, when two consecutive Put Object requests are sent to the S3
Server for a versioning-enabled bucket with the same key names, there
are two corresponding metadata PUT calls with the ``versioning`` option
set to true.
The PUT calls to metadata and resulting keys are shown below:
(1) PUT foo (first put), versioning: ``true``
+----------+---------+
| key | value |
+==========+=========+
| foo | A |
+----------+---------+
| foo.v1 | A |
+----------+---------+
(2) PUT foo (second put), versioning: ``true``
+----------+---------+
| key | value |
+==========+=========+
| foo | B |
+----------+---------+
| foo.v2 | B |
+----------+---------+
| foo.v1 | A |
+----------+---------+
Null Version Management
^^^^^^^^^^^^^^^^^^^^^^^
In a bucket without versioning, or when versioning is suspended, putting
an object with the same name twice should result in the previous object
being overwritten. This is managed with null versions.
Only one null version should exist at any given time, and it is
identified in S3 requests and responses with the version id "null".
Case 1: Putting Null Versions
'''''''''''''''''''''''''''''
With respect to metadata, since the null version is overwritten by
subsequent null versions, the null version is initially stored in the
master key alone, as opposed to being stored in the master key and a new
version. S3 checks if versioning is suspended or has never been
configured, and sets the ``versionId`` option to ``''`` in PUT calls to
the metadata engine when creating a new null version.
The tables below show the keys resulting from PUT calls to metadata if
we put an object 'foo' twice, when versioning has not been enabled or is
suspended.
(1) PUT foo (first put), versionId: ``''``
+--------------+---------+
| key | value |
+==============+=========+
| foo (null) | A |
+--------------+---------+
(2) PUT foo (second put), versionId: ``''``
+--------------+---------+
| key | value |
+==============+=========+
| foo (null) | B |
+--------------+---------+
The S3 API also sets the ``isNull`` attribute to ``true`` in the version
metadata before storing the metadata for these null versions.
Case 2: Preserving Existing Null Versions in Versioning-Enabled Bucket
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Null versions are preserved when new non-null versions are created after
versioning has been enabled or re-enabled.
If the master version is the null version, the S3 API preserves the
current null version by storing it as a new key ``(3A)`` in a separate
PUT call to metadata, prior to overwriting the master version ``(3B)``.
This implies the null version may not necessarily be the latest or
master version.
To determine whether the master version is a null version, the S3 API
checks if the master version's ``isNull`` property is set to ``true``,
or if the ``versionId`` attribute of the master version is undefined
(indicating it is a null version that was put before bucket versioning
was configured).
Continuing the example from Case 1, if we enabled versioning and put
another object, the calls to metadata and resulting keys would resemble
the following:
(3A) PUT foo, versionId: ``<versionId of master version>`` if defined or
``<non-versioned object id>``
+-----------------+---------+
| key | value |
+=================+=========+
| foo | B |
+-----------------+---------+
| foo.v1 (null) | B |
+-----------------+---------+
(3B) PUT foo, versioning: ``true``
+-----------------+---------+
| key | value |
+=================+=========+
| foo | C |
+-----------------+---------+
| foo.v2 | C |
+-----------------+---------+
| foo.v1 (null) | B |
+-----------------+---------+
To prevent issues with concurrent requests, S3 ensures the null version
is stored with the same version ID by using ``versionId`` option. S3
sets the ``versionId`` option to the master version's ``versionId``
metadata attribute value during the PUT. This creates a new version with
the same version ID of the existing null master version.
The null version's ``versionId`` attribute may be undefined because it
was generated before the bucket versioning was configured. In that case,
a version ID is generated using the max epoch and sequence values
possible so that the null version will be properly ordered as the last
entry in a metadata listing. This value ("non-versioned object id") is
used in the PUT call with the ``versionId`` option.
Case 3: Overwriting a Null Version That is Not Latest Version
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
Normally when versioning is suspended, S3 uses the ``versionId: ''``
option in a PUT to metadata to create a null version. This also
overwrites an existing null version if it is the master version.
However, if there is a null version that is not the latest version, S3
cannot rely on the ``versionId: ''`` option will not overwrite the
existing null version. Instead, before creating a new null version, the
S3 API must send a separate DELETE call to metadata specifying the
version id of the current null version for delete.
To do this, when storing a null version (3A above) before storing a new
non-null version, S3 records the version's ID in the ``nullVersionId``
attribute of the non-null version. For steps 3A and 3B above, these are
the values stored in the ``nullVersionId`` of each version's metadata:
(3A) PUT foo, versioning: ``true``
+-----------------+---------+-----------------------+
| key | value | value.nullVersionId |
+=================+=========+=======================+
| foo | B | undefined |
+-----------------+---------+-----------------------+
| foo.v1 (null) | B | undefined |
+-----------------+---------+-----------------------+
(3B) PUT foo, versioning: ``true``
+-----------------+---------+-----------------------+
| key | value | value.nullVersionId |
+=================+=========+=======================+
| foo | C | v1 |
+-----------------+---------+-----------------------+
| foo.v2 | C | v1 |
+-----------------+---------+-----------------------+
| foo.v1 (null) | B | undefined |
+-----------------+---------+-----------------------+
If defined, the ``nullVersionId`` of the master version is used with the
``versionId`` option in a DELETE call to metadata if a Put Object
request is received when versioning is suspended in a bucket.
(4A) DELETE foo, versionId: ``<nullVersionId of master version>`` (v1)
+----------+---------+
| key | value |
+==========+=========+
| foo | C |
+----------+---------+
| foo.v2 | C |
+----------+---------+
Then the master version is overwritten with the new null version:
(4B) PUT foo, versionId: ``''``
+--------------+---------+
| key | value |
+==============+=========+
| foo (null) | D |
+--------------+---------+
| foo.v2 | C |
+--------------+---------+
The ``nullVersionId`` attribute is also used to retrieve the correct
version when the version ID "null" is specified in certain object-level
APIs, described further in the section `"Null Version
Mapping" <#null-version-mapping>`__.
Specifying Versions in APIs for Putting Versions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Since S3 does not allow an overwrite of existing version data, Put
Object, Complete Multipart Upload and Copy Object return
``400 InvalidArgument`` if a specific version ID is specified in the
request query, e.g. for a ``PUT /foo?versionId=v1`` request.
PUT Example
~~~~~~~~~~~
When S3 receives a request to PUT an object:
- It checks first if versioning has been configured
- If it has not been configured, S3 proceeds to puts the new data, puts
the metadata by overwriting the master version, and proceeds to
delete any pre-existing data
If versioning has been configured, S3 checks the following:
Versioning Enabled
^^^^^^^^^^^^^^^^^^
If versioning is enabled and there is existing object metadata:
- If the master version is a null version (``isNull: true``) or has no
version ID (put before versioning was configured):
- store the null version metadata as a new version
- create a new version and overwrite the master version
- set ``nullVersionId``: version ID of the null version that was
stored
If versioning is enabled and the master version is not null; or there is
no existing object metadata:
- create a new version and store it, and overwrite the master version
Versioning Suspended
^^^^^^^^^^^^^^^^^^^^
If versioning is suspended and there is existing object metadata:
- If the master version is a null version or has no version ID:
- overwrite the master version with the new metadata
- delete previous object data
- If master is not a null version and ``nullVersionId`` is defined in
the objects metadata:
- delete the current null version metadata and data
- overwrite the master version with the new metadata
If there is no existing object metadata, create the new null version as
the master version.
In each of the above cases, set ``isNull`` metadata attribute to true
when creating the new null version.
Behavior of Object-Targeting APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
API methods which can target existing objects or versions, such as Get
Object, Head Object, Get Object ACL, Put Object ACL, Copy Object and
Copy Part, will perform the action on the latest version of an object if
no version ID is specified in the request query or relevant request
header (``x-amz-copy-source-version-id`` for Copy Object and Copy Part
APIs).
Two exceptions are the Delete Object and Multi-Object Delete APIs, which
will instead attempt to create delete markers, described in the
following section, if no version ID is specified.
No versioning options are necessary to retrieve the latest version from
metadata, since the master version is stored in a key with the name of
the object. However, when updating the latest version, such as with the
Put Object ACL API, S3 sets the ``versionId`` option in the PUT call to
metadata to the value stored in the object metadata's ``versionId``
attribute. This is done in order to update the metadata both in the
master version and the version itself, if it is not a null version.
When a version id is specified in the request query for these APIs, e.g.
``GET /foo?versionId=v1``, S3 will attempt to decode the version ID and
perform the action on the appropriate version. To do so, the API sets
the value of the ``versionId`` option to the decoded version ID in the
metadata call.
Delete Markers
^^^^^^^^^^^^^^
If versioning has not been configured for a bucket, the Delete Object
and Multi-Object Delete APIs behave as their standard APIs.
If versioning has been configured, S3 deletes object or version data
only if a specific version ID is provided in the request query, e.g.
``DELETE /foo?versionId=v1``.
If no version ID is provided, S3 creates a delete marker by creating a
0-byte version with the metadata attribute ``isDeleteMarker: true``. The
S3 API will return a ``404 NoSuchKey`` error in response to requests
getting or heading an object whose latest version is a delete maker.
To restore a previous version as the latest version of an object, the
delete marker must be deleted, by the same process as deleting any other
version.
The response varies when targeting an object whose latest version is a
delete marker for other object-level APIs that can target existing
objects and versions, without specifying the version ID.
- Get Object, Head Object, Get Object ACL, Object Copy and Copy Part
return ``404 NoSuchKey``.
- Put Object ACL and Put Object Tagging return
``405 MethodNotAllowed``.
These APIs respond to requests specifying the version ID of a delete
marker with the error ``405 MethodNotAllowed``, in general. Copy Part
and Copy Object respond with ``400 Invalid Request``.
See section `"Delete Example" <#delete-example>`__ for a summary.
Null Version Mapping
^^^^^^^^^^^^^^^^^^^^
When the null version is specified in a request with the version ID
"null", the S3 API must use the ``nullVersionId`` stored in the latest
version to retrieve the current null version, if the null version is not
the latest version.
Thus, getting the null version is a two step process:
1. Get the latest version of the object from metadata. If the latest
version's ``isNull`` property is ``true``, then use the latest
version's metadata. Otherwise,
2. Get the null version of the object from metadata, using the internal
version ID of the current null version stored in the latest version's
``nullVersionId`` metadata attribute.
DELETE Example
~~~~~~~~~~~~~~
The following steps are used in the delete logic for delete marker
creation:
- If versioning has not been configured: attempt to delete the object
- If request is version-specific delete request: attempt to delete the
version
- otherwise, if not a version-specific delete request and versioning
has been configured:
- create a new 0-byte content-length version
- in version's metadata, set a 'isDeleteMarker' property to true
- Return the version ID of any version deleted or any delete marker
created
- Set response header ``x-amz-delete-marker`` to true if a delete
marker was deleted or created
The Multi-Object Delete API follows the same logic for each of the
objects or versions listed in an xml request. Note that a delete request
can result in the creation of a deletion marker even if the object
requested to delete does not exist in the first place.
Object-level APIs which can target existing objects and versions perform
the following checks regarding delete markers:
- If not a version-specific request and versioning has been configured,
check the metadata of the latest version
- If the 'isDeleteMarker' property is set to true, return
``404 NoSuchKey`` or ``405 MethodNotAllowed``
- If it is a version-specific request, check the object metadata of the
requested version
- If the ``isDeleteMarker`` property is set to true, return
``405 MethodNotAllowed`` or ``400 InvalidRequest``
Data-metadata daemon Architecture and Operational guide
=======================================================
This document presents the architecture of the data-metadata daemon
(dmd) used for the community edition of S3 server. It also provides a
guide on how to operate it.
The dmd is responsible for storing and retrieving S3 data and metadata,
and is accessed by S3 connectors through socket.io (metadata) and REST
(data) APIs.
It has been designed such that more than one S3 connector can access the
same buckets by communicating with the dmd. It also means that the dmd
can be hosted on a separate container or machine.
Operation
---------
Startup
~~~~~~~
The simplest deployment is still to launch with npm start, this will
start one instance of the S3 connector and will listen on the locally
bound dmd ports 9990 and 9991 (by default, see below).
The dmd can be started independently from the S3 server by running this
command in the S3 directory:
::
npm run start_dmd
This will open two ports:
- one is based on socket.io and is used for metadata transfers (9990 by
default)
- the other is a REST interface used for data transfers (9991 by
default)
Then, one or more instances of S3 server without the dmd can be started
elsewhere with:
::
npm run start_s3server
Configuration
~~~~~~~~~~~~~
Most configuration happens in ``config.json`` for S3 server, local
storage paths can be changed where the dmd is started using environment
variables, like before: ``S3DATAPATH`` and ``S3METADATAPATH``.
In ``config.json``, the following sections are used to configure access
to the dmd through separate configuration of the data and metadata
access:
::
"metadataClient": {
"host": "localhost",
"port": 9990
},
"dataClient": {
"host": "localhost",
"port": 9991
},
To run a remote dmd, you have to do the following:
- change both ``"host"`` attributes to the IP or host name where the
dmd is run.
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
``"dataDaemon"`` sections where the dmd is run to accept remote
connections (e.g. ``"::"``)
Architecture
------------
This section gives a bit more insight on how it works internally.
.. figure:: ./images/data_metadata_daemon_arch.png
:alt: Architecture diagram
./images/data\_metadata\_daemon\_arch.png
Metadata on socket.io
~~~~~~~~~~~~~~~~~~~~~
This communication is based on an RPC system based on socket.io events
sent by S3 connectors, received by the DMD and acknowledged back to the
S3 connector.
The actual payload sent through socket.io is a JSON-serialized form of
the RPC call name and parameters, along with some additional information
like the request UIDs, and the sub-level information, sent as object
attributes in the JSON request.
With introduction of versioning support, the updates are now gathered in
the dmd for some number of milliseconds max, before being batched as a
single write to the database. This is done server-side, so the API is
meant to send individual updates.
Four RPC commands are available to clients: ``put``, ``get``, ``del``
and ``createReadStream``. They more or less map the parameters accepted
by the corresponding calls in the LevelUp implementation of LevelDB.
They differ in the following:
- The ``sync`` option is ignored (under the hood, puts are gathered
into batches which have their ``sync`` property enforced when they
are committed to the storage)
- Some additional versioning-specific options are supported
- ``createReadStream`` becomes asynchronous, takes an additional
callback argument and returns the stream in the second callback
parameter
Debugging the socket.io exchanges can be achieved by running the daemon
with ``DEBUG='socket.io*'`` environment variable set.
One parameter controls the timeout value after which RPC commands sent
end with a timeout error, it can be changed either:
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
``lib/network/rpc/rpc.js``
- or in the constructor call of the ``MetadataFileClient`` object (in
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
Default value is 30000.
A specific implementation deals with streams, currently used for listing
a bucket. Streams emit ``"stream-data"`` events that pack one or more
items in the listing, and a special ``“stream-end”`` event when done.
Flow control is achieved by allowing a certain number of “in flight”
packets that have not received an ack yet (5 by default). Two options
can tune the behavior (for better throughput or getting it more robust
on weak networks), they have to be set in ``mdserver.js`` file directly,
as there is no support in ``config.json`` for now for those options:
- ``streamMaxPendingAck``: max number of pending ack events not yet
received (default is 5)
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output
stream packet is sent to the client (default is 5000)
Data exchange through the REST data port
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Data is read and written with REST semantic.
The web server recognizes a base path in the URL of ``/DataFile`` to be
a request to the data storage service.
PUT
^^^
A PUT on ``/DataFile`` URL and contents passed in the request body will
write a new object to the storage.
On success, a ``201 Created`` response is returned and the new URL to
the object is returned via the ``Location`` header (e.g.
``Location: /DataFile/50165db76eecea293abfd31103746dadb73a2074``). The
raw key can then be extracted simply by removing the leading
``/DataFile`` service information from the returned URL.
GET
^^^
A GET is simply issued with REST semantic, e.g.:
::
GET /DataFile/50165db76eecea293abfd31103746dadb73a2074 HTTP/1.1
A GET request can ask for a specific range. Range support is complete
except for multiple byte ranges.
DELETE
^^^^^^
DELETE is similar to GET, except that a ``204 No Content`` response is
returned on success.
Listing
=======
Listing Types
-------------
We use three different types of metadata listing for various operations.
Here are the scenarios we use each for:
- 'Delimiter' - when no versions are possible in the bucket since it is
an internally-used only bucket which is not exposed to a user.
Namely,
1. to list objects in the "user's bucket" to respond to a GET SERVICE
request and
2. to do internal listings on an MPU shadow bucket to complete multipart
upload operations.
- 'DelimiterVersion' - to list all versions in a bucket
- 'DelimiterMaster' - to list just the master versions of objects in a
bucket
Algorithms
----------
The algorithms for each listing type can be found in the open-source
`scality/Arsenal <https://github.com/scality/Arsenal>`__ repository, in
`lib/algos/list <https://github.com/scality/Arsenal/tree/master/lib/algos/list>`__.

276
docs/CLIENTS.rst Normal file
View File

@ -0,0 +1,276 @@
Clients
=========
List of applications that have been tested with S3 Server
GUI
~~~
`Cyberduck <https://cyberduck.io/?l=en>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- https://www.youtube.com/watch?v=-n2MCt4ukUg
- https://www.youtube.com/watch?v=IyXHcu4uqgU
`Cloud Explorer <https://www.linux-toys.com/?p=945>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- https://www.youtube.com/watch?v=2hhtBtmBSxE
`CloudBerry Lab <http://www.cloudberrylab.com>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- https://youtu.be/IjIx8g\_o0gY
Command Line Tools
~~~~~~~~~~~~~~~~~~
`s3curl <https://github.com/rtdp/s3curl>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
https://github.com/scality/S3/blob/master/tests/functional/s3curl/s3curl.pl
`aws-cli <http://docs.aws.amazon.com/cli/latest/reference/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``~/.aws/credentials`` on Linux, OS X, or Unix or
``C:\Users\USERNAME\.aws\credentials`` on Windows
.. code:: shell
[default]
aws_access_key_id = accessKey1
aws_secret_access_key = verySecretKey1
``~/.aws/config`` on Linux, OS X, or Unix or
``C:\Users\USERNAME\.aws\config`` on Windows
.. code:: shell
[default]
region = us-east-1
Note: ``us-east-1`` is the default region, but you can specify any
region.
See all buckets:
.. code:: shell
aws s3 ls --endpoint-url=http://localhost:8000
Create bucket:
.. code:: shell
aws --endpoint-url=http://localhost:8000 s3 mb s3://mybucket
`s3cmd <http://s3tools.org/s3cmd>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If using s3cmd as a client to S3 be aware that v4 signature format is
buggy in s3cmd versions < 1.6.1.
``~/.s3cfg`` on Linux, OS X, or Unix or ``C:\Users\USERNAME\.s3cfg`` on
Windows
.. code:: shell
[default]
access_key = accessKey1
secret_key = verySecretKey1
host_base = localhost:8000
host_bucket = %(bucket).localhost:8000
signature_v2 = False
use_https = False
See all buckets:
.. code:: shell
s3cmd ls
`rclone <http://rclone.org/s3/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``~/.rclone.conf`` on Linux, OS X, or Unix or
``C:\Users\USERNAME\.rclone.conf`` on Windows
.. code:: shell
[remote]
type = s3
env_auth = false
access_key_id = accessKey1
secret_access_key = verySecretKey1
region = other-v2-signature
endpoint = http://localhost:8000
location_constraint =
acl = private
server_side_encryption =
storage_class =
See all buckets:
.. code:: shell
rclone lsd remote:
JavaScript
~~~~~~~~~~
`AWS JavaScript SDK <http://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: javascript
const AWS = require('aws-sdk');
const s3 = new AWS.S3({
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
endpoint: 'localhost:8000',
sslEnabled: false,
s3ForcePathStyle: true,
});
JAVA
~~~~
`AWS JAVA SDK <http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/s3/AmazonS3Client.html>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: java
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.S3ClientOptions;
import com.amazonaws.services.s3.model.Bucket;
public class S3 {
public static void main(String[] args) {
AWSCredentials credentials = new BasicAWSCredentials("accessKey1",
"verySecretKey1");
// Create a client connection based on credentials
AmazonS3 s3client = new AmazonS3Client(credentials);
s3client.setEndpoint("http://localhost:8000");
// Using path-style requests
// (deprecated) s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
s3client.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Create bucket
String bucketName = "javabucket";
s3client.createBucket(bucketName);
// List off all buckets
for (Bucket bucket : s3client.listBuckets()) {
System.out.println(" - " + bucket.getName());
}
}
}
Ruby
~~~~
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: ruby
require 'aws-sdk'
s3 = Aws::S3::Client.new(
:access_key_id => 'accessKey1',
:secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:force_path_style => true
)
resp = s3.list_buckets
`fog <http://fog.io/storage/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: ruby
require "fog"
connection = Fog::Storage.new(
{
:provider => "AWS",
:aws_access_key_id => 'accessKey1',
:aws_secret_access_key => 'verySecretKey1',
:endpoint => 'http://localhost:8000',
:path_style => true,
:scheme => 'http',
})
Python
~~~~~~
`boto2 <http://boto.cloudhackers.com/en/latest/ref/s3.html>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: python
import boto
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
connection = S3Connection(
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
is_secure=False,
port=8000,
calling_format=OrdinaryCallingFormat(),
host='localhost'
)
connection.create_bucket('mybucket')
`boto3 <http://boto3.readthedocs.io/en/latest/index.html>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: python
import boto3
client = boto3.client(
's3',
aws_access_key_id='accessKey1',
aws_secret_access_key='verySecretKey1',
endpoint_url='http://localhost:8000'
)
lists = client.list_buckets()
PHP
~~~
Should use v3 over v2 because v2 would create virtual-hosted style URLs
while v3 generates path-style URLs.
`AWS PHP SDK v3 <https://docs.aws.amazon.com/aws-sdk-php/v3/guide>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: php
use Aws\S3\S3Client;
$client = S3Client::factory([
'region' => 'us-east-1',
'version' => 'latest',
'endpoint' => 'http://localhost:8000',
'credentials' => [
'key' => 'accessKey1',
'secret' => 'verySecretKey1'
]
]);
$client->createBucket(array(
'Bucket' => 'bucketphp',
));

24
docs/CONTRIBUTING.rst Normal file
View File

@ -0,0 +1,24 @@
Contributing
============
Need help?
----------
We're always glad to help out. Simply open a
`GitHub issue <https://github.com/scality/S3/issues>`__ and we'll give you
insight. If what you want is not available, and if you're willing to help us
out, we'll be happy to welcome you in the team, whether for a small fix or for
a larger feature development. Thanks for your interest!
Got an idea? Get started!
-------------------------
In order to contribute, please follow the `Contributing
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
If anything is unclear to you, reach out to us on
`slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
Don't write code? There are other ways to help!
-----------------------------------------------
We're always eager to learn about our users' stories. If you can't contribute
code, but would love to help us, please shoot us an email at zenko@scality.com,
and tell us what our software enables you to do! Thanks for your time!

259
docs/DOCKER.rst Normal file
View File

@ -0,0 +1,259 @@
Docker
======
- `For continuous integration with
Docker <#for-continuous-integration-with-docker>`__
- `Environment Variables <#environment-variables>`__
- `In production with Docker <#in-production-with-docker>`__
- `Using Docker Volume in
production <#using-docker-volume-in-production>`__
- `Adding modifying or deleting accounts or users
credentials <#adding-modifying-or-deleting-accounts-or-users-credentials>`__
- `Specifying your own host name <#specifying-your-own-host-name>`__
- `Running as an unprivileged
user <#running-as-an-unprivileged-user>`__
For continuous integration with Docker
--------------------------------------
When you start the Docker Scality S3 server image, you can adjust the
configuration of the Scality S3 server instance by passing one or more
environment variables on the docker run command line.
Environment Variables
~~~~~~~~~~~~~~~~~~~~~
S3DATA=multiple
^^^^^^^^^^^^^^^
This runs Scality S3 server with multiple data backends. `More
info <https://github.com/scality/S3#run-it-with-multiple-data-backends>`__
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e S3DATA=multiple scality/s3server
HOST\_NAME
^^^^^^^^^^
This variable specifies a host name. If you have a domain such as
new.host.com, by specifying that here, you and your users can direct s3
server requests to new.host.com.
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e HOST_NAME=new.host.com scality/s3server
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
permissions, make sure to associate 127.0.0.1 with ``new.host.com``
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These variables specify authentication credentials for an account named
"CustomAccount".
You can set credentials for many accounts by editing
``conf/authdata.json`` (see below for further info), but if you just
want to specify one set of your own, you can use these environment
variables.
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
Note: Anything in the ``authdata.json`` file will be ignored. Note: The
old ``ACCESS_KEY`` and ``SECRET_KEY`` environment variables are now
deprecated
LOG\_LEVEL
^^^^^^^^^^
This variable allows you to change the log level: info, debug or trace.
The default is info. Debug will give you more detailed logs and trace
will give you the most detailed.
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
SSL
^^^
This variable specifies the Common Name ``<DOMAIN_NAME>`` used to create
the Certificate Signing Request using OpenSSL. This allows you to run S3
with SSL:
**Note**: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
permissions, make sure to associate 127.0.0.1 with
``<SUBDOMAIN>.<DOMAIN_NAME>``
**Warning**: These certs, being self-signed (and the CA being generated
inside the container) will be untrusted by any clients, and could
disappear on a container upgrade. That's ok as long as it's for quick
testing. Also, best security practice for non-testing would be to use an
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
to limit what an exploit on either component could expose, as well as
certificates in a mounted volume
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e SSL=<DOMAIN_NAME> -e HOST_NAME=<SUBDOMAIN>.<DOMAIN_NAME>
scality/s3server
More information about how to use S3 server with SSL
`here <https://s3.scality.com/v1.0/page/scality-with-ssl>`__
LISTEN\_ADDR
^^^^^^^^^^^^
This variable instructs the S3 server, and its data and metadata components
to listen on the specified address. This allows starting the data or metadata
servers as standalone services, for example.
.. code:: shell
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
scality/s3server npm run start_dataserver
DATA\_HOST and METADATA\_HOST
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These variables configure the data and metadata servers to use,
usually when they are running on another host and only starting the stateless
S3 REST server.
.. code:: shell
docker run -d --name s3server -e DATA_HOST=s3server-data
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
REDIS\_HOST
^^^^^^^^^^^
Use this variable to connect to the redis cache server on another host than
localhost.
.. code:: shell
docker run -d --name s3server -p 8000:8000
-e REDIS_HOST=my-redis-server.example.com scality/s3server
REDIS\_PORT
^^^^^^^^^^^
Use this variable to connect to the redis cache server on another port than
the default 6379.
.. code:: shell
docker run -d --name s3server -p 8000:8000
-e REDIS_PORT=6379 scality/s3server
In production with Docker
-------------------------
Using Docker Volume in production
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
S3 server runs with a file backend by default.
So, by default, the data is stored inside your S3 server Docker
container.
However, if you want your data and metadata to persist, you **MUST** use
Docker volumes to host your data and metadata outside your s3 server
Docker container. Otherwise, the data and metadata will be destroyed
when you erase the container.
.. code:: shell
docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata
-p 8000:8000 ­-d scality/s3server
This command mounts the host directory, ``./data``, into the container
at /usr/src/app/localData and the host directory, ``./metadata``, into
the container at /usr/src/app/localMetaData. It can also be any host
mount point, like ``/mnt/data`` and ``/mnt/metadata``.
Adding modifying or deleting accounts or users credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Create locally a customized ``authdata.json``.
2. Use `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
to override the default ``authdata.json`` through a docker file mapping.
For example:
.. code:: shell
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
scality/s3server
Specifying your own host name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To specify a host name (e.g. s3.domain.name), you can provide your own
`config.json <https://github.com/scality/S3/blob/master/config.json>`__
using `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__.
First add a new key-value pair in the restEndpoints section of your
config.json. The key in the key-value pair should be the host name you
would like to add and the value is the default location\_constraint for
this endpoint.
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
of the ``location_constraints`` listed in your locationConfig.json file
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
More information about location configuration
`here <https://github.com/scality/S3/blob/master/README.md#location-configuration>`__
.. code:: json
"restEndpoints": {
"localhost": "file",
"127.0.0.1": "file",
...
"s3.example.com": "us-east-1"
},
Then, run your Scality S3 Server using `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
.. code:: shell
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
Your local ``config.json`` file will override the default one through a
docker file mapping.
Running as an unprivileged user
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
S3 Server runs as root by default.
You can change that by modifing the dockerfile and specifying a user
before the entrypoint.
The user needs to exist within the container, and own the folder
**/usr/src/app** for Scality S3 Server to run properly.
For instance, you can modify these lines in the dockerfile:
.. code:: shell
...
&& groupadd -r -g 1001 scality \
&& useradd -u 1001 -g 1001 -d /usr/src/app -r scality \
&& chown -R scality:scality /usr/src/app
...
USER scality
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]

414
docs/GETTING_STARTED.rst Normal file
View File

@ -0,0 +1,414 @@
Getting Started
=================
.. figure:: ../res/Scality-S3-Server-Logo-Large.png
:alt: S3 Server logo
|CircleCI| |Scality CI|
Installation
------------
Dependencies
~~~~~~~~~~~~
Building and running the Scality S3 Server requires node.js 6.9.5 and
npm v3 . Up-to-date versions can be found at
`Nodesource <https://github.com/nodesource/distributions>`__.
Clone source code
~~~~~~~~~~~~~~~~~
.. code:: shell
git clone https://github.com/scality/S3.git
Install js dependencies
~~~~~~~~~~~~~~~~~~~~~~~
Go to the ./S3 folder,
.. code:: shell
npm install
Run it with a file backend
--------------------------
.. code:: shell
npm start
This starts an S3 server on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with a secret key of
verySecretKey1.
By default the metadata files will be saved in the localMetadata
directory and the data files will be saved in the localData directory
within the ./S3 directory on your machine. These directories have been
pre-created within the repository. If you would like to save the data or
metadata in different locations of your choice, you must specify them
with absolute paths. So, when starting the server:
.. code:: shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
npm start
Run it with multiple data backends
----------------------------------
.. code:: shell
export S3DATA='multiple'
npm start
This starts an S3 server on port 8000. The default access key is
accessKey1 with a secret key of verySecretKey1.
With multiple backends, you have the ability to choose where each object
will be saved by setting the following header with a locationConstraint
on a PUT request:
.. code:: shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
If no header is sent with a PUT object request, the location constraint
of the bucket will determine where the data is saved. If the bucket has
no location constraint, the endpoint of the PUT request will be used to
determine location.
See the Configuration section below to learn how to set location
constraints.
Run it with an in-memory backend
--------------------------------
.. code:: shell
npm run mem_backend
This starts an S3 server on port 8000. The default access key is
accessKey1 with a secret key of verySecretKey1.
Run it for continuous integration testing or in production with Docker
----------------------------------------------------------------------
`DOCKER.rst <DOCKER.rst>`__
Testing
-------
You can run the unit tests with the following command:
.. code:: shell
npm test
You can run the multiple backend unit tests with:
.. code:: shell
npm run multiple_backend_test
You can run the linter with:
.. code:: shell
npm run lint
Running functional tests locally:
The test suite requires additional tools, **s3cmd** and **Redis**
installed in the environment the tests are running in.
- Install `s3cmd <http://s3tools.org/download>`__
- Install `redis <https://redis.io/download>`__ and start Redis.
- Add localCache section to your ``config.json``:
::
"localCache": {
"host": REDIS_HOST,
"port": REDIS_PORT
}
where ``REDIS_HOST`` is your Redis instance IP address (``"127.0.0.1"``
if your Redis is running locally) and ``REDIS_PORT`` is your Redis
instance port (``6379`` by default)
- Add the following to the etc/hosts file on your machine:
.. code:: shell
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
- Start the S3 server in memory and run the functional tests:
.. code:: shell
npm run mem_backend
npm run ft_test
Configuration
-------------
There are three configuration files for your Scality S3 Server:
1. ``conf/authdata.json``, described above for authentication
2. ``locationConfig.json``, to set up configuration options for
where data will be saved
3. ``config.json``, for general configuration options
Location Configuration
~~~~~~~~~~~~~~~~~~~~~~
You must specify at least one locationConstraint in your
locationConfig.json (or leave as pre-configured).
You must also specify 'us-east-1' as a locationConstraint so if you only
define one locationConstraint, that would be it. If you put a bucket to
an unknown endpoint and do not specify a locationConstraint in the put
bucket call, us-east-1 will be used.
For instance, the following locationConstraint will save data sent to
``myLocationConstraint`` to the file backend:
.. code:: json
"myLocationConstraint": {
"type": "file",
"legacyAwsBehavior": false,
"details": {}
},
Each locationConstraint must include the ``type``,
``legacyAwsBehavior``, and ``details`` keys. ``type`` indicates which
backend will be used for that region. Currently, mem, file, and scality
are the supported backends. ``legacyAwsBehavior`` indicates whether the
region will have the same behavior as the AWS S3 'us-east-1' region. If
the locationConstraint type is scality, ``details`` should contain
connector information for sproxyd. If the locationConstraint type is mem
or file, ``details`` should be empty.
Once you have your locationConstraints in your locationConfig.json, you
can specify a default locationConstraint for each of your endpoints.
For instance, the following sets the ``localhost`` endpoint to the
``myLocationConstraint`` data backend defined above:
.. code:: json
"restEndpoints": {
"localhost": "myLocationConstraint"
},
If you would like to use an endpoint other than localhost for your
Scality S3 Server, that endpoint MUST be listed in your
``restEndpoints``. Otherwise if your server is running with a:
- **file backend**: your default location constraint will be ``file``
- **memory backend**: your default location constraint will be ``mem``
Endpoints
~~~~~~~~~
Note that our S3server supports both:
- path-style: http://myhostname.com/mybucket
- hosted-style: http://mybucket.myhostname.com
However, hosted-style requests will not hit the server if you are using
an ip address for your host. So, make sure you are using path-style
requests in that case. For instance, if you are using the AWS SDK for
JavaScript, you would instantiate your client like this:
.. code:: js
const s3 = new aws.S3({
endpoint: 'http://127.0.0.1:8000',
s3ForcePathStyle: true,
});
Setting your own access key and secret key pairs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can set credentials for many accounts by editing
``conf/authdata.json`` but if you want to specify one set of your own
credentials, you can use ``SCALITY_ACCESS_KEY_ID`` and
``SCALITY_SECRET_ACCESS_KEY`` environment variables.
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These variables specify authentication credentials for an account named
"CustomAccount".
Note: Anything in the ``authdata.json`` file will be ignored.
.. code:: shell
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
Scality with SSL
~~~~~~~~~~~~~~~~~~~~~~
If you wish to use https with your local S3 Server, you need to set up
SSL certificates. Here is a simple guide of how to do it.
Deploying S3 Server
^^^^^^^^^^^^^^^^^^^
First, you need to deploy **S3 Server**. This can be done very easily
via `our **DockerHub**
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
with a file backend).
*Note:* *- If you don't have docker installed on your machine, here
are the `instructions to install it for your
distribution <https://docs.docker.com/engine/installation/>`__*
Updating your S3 Server container's config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You're going to add your certificates to your container. In order to do
so, you need to exec inside your s3 server container. Run a
``$> docker ps`` and find your container's id (the corresponding image
name should be ``scality/s3server``. Copy the corresponding container id
(here we'll use ``894aee038c5e``, and run:
.. code:: sh
$> docker exec -it 894aee038c5e bash
You're now inside your container, using an interactive terminal :)
Generate SSL key and certificates
**********************************
There are 5 steps to this generation. The paths where the different
files are stored are defined after the ``-out`` option in each command
.. code:: sh
# Generate a private key for your CSR
$> openssl genrsa -out ca.key 2048
# Generate a self signed certificate for your local Certificate Authority
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
# Generate a key for S3 Server
$> openssl genrsa -out test.key 2048
# Generate a Certificate Signing Request for S3 Server
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
# Generate a local-CA-signed certificate for S3 Server
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
Update S3Server ``config.json``
**********************************
Add a ``certFilePaths`` section to ``./config.json`` with the
appropriate paths:
.. code:: json
"certFilePaths": {
"key": "./test.key",
"cert": "./test.crt",
"ca": "./ca.crt"
}
Run your container with the new config
****************************************
First, you need to exit your container. Simply run ``$> exit``. Then,
you need to restart your container. Normally, a simple
``$> docker restart s3server`` should do the trick.
Update your host config
^^^^^^^^^^^^^^^^^^^^^^^
Associates local IP addresses with hostname
*******************************************
In your ``/etc/hosts`` file on Linux, OS X, or Unix (with root
permissions), edit the line of localhost so it looks like this:
::
127.0.0.1 localhost s3.scality.test
Copy the local certificate authority from your container
*********************************************************
In the above commands, it's the file named ``ca.crt``. Choose the path
you want to save this file at (here we chose ``/root/ca.crt``), and run
something like:
.. code:: sh
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
Test your config
^^^^^^^^^^^^^^^^^
If you do not have aws-sdk installed, run ``$> npm install aws-sdk``. In
a ``test.js`` file, paste the following script:
.. code:: js
const AWS = require('aws-sdk');
const fs = require('fs');
const https = require('https');
const httpOptions = {
agent: new https.Agent({
// path on your host of the self-signed certificate
ca: fs.readFileSync('./ca.crt', 'ascii'),
}),
};
const s3 = new AWS.S3({
httpOptions,
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
// The endpoint must be s3.scality.test, else SSL will not work
endpoint: 'https://s3.scality.test:8000',
sslEnabled: true,
// With this setup, you must use path-style bucket access
s3ForcePathStyle: true,
});
const bucket = 'cocoriko';
s3.createBucket({ Bucket: bucket }, err => {
if (err) {
return console.log('err createBucket', err);
}
return s3.deleteBucket({ Bucket: bucket }, err => {
if (err) {
return console.log('err deleteBucket', err);
}
return console.log('SSL is cool!');
});
});
Now run that script with ``$> nodejs test.js``. If all goes well, it
should output ``SSL is cool!``. Enjoy that added security!
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg
:target: https://circleci.com/gh/scality/S3
.. |Scality CI| image:: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
:target: http://ci.ironmann.io/gh/scality/S3

642
docs/INTEGRATIONS.rst Normal file
View File

@ -0,0 +1,642 @@
Integrations
++++++++++++
High Availability
=================
`Docker swarm <https://docs.docker.com/engine/swarm/>`__ is a
clustering tool developped by Docker and ready to use with its
containers. It allows to start a service, which we define and use as a
means to ensure s3server's continuous availability to the end user.
Indeed, a swarm defines a manager and n workers among n+1 servers. We
will do a basic setup in this tutorial, with just 3 servers, which
already provides a strong service resiliency, whilst remaining easy to
do as an individual. We will use NFS through docker to share data and
metadata between the different servers.
You will see that the steps of this tutorial are defined as **On
Server**, **On Clients**, **On All Machines**. This refers respectively
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
the IP of the Server will be **10.200.15.113**, while the IPs of the
Clients will be **10.200.15.96 and 10.200.15.97**
Installing docker
-----------------
Any version from docker 1.12.6 onwards should work; we used Docker
17.03.0-ce for this tutorial.
On All Machines
~~~~~~~~~~~~~~~
On Ubuntu 14.04
^^^^^^^^^^^^^^^
The docker website has `solid
documentation <https://docs.docker.com/engine/installation/linux/ubuntu/>`__.
We have chosen to install the aufs dependency, as recommended by Docker.
Here are the required commands:
.. code:: sh
$> sudo apt-get update
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
$> sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
$> curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
$> sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
$> sudo apt-get update
$> sudo apt-get install docker-ce
On CentOS 7
^^^^^^^^^^^
The docker website has `solid
documentation <https://docs.docker.com/engine/installation/linux/centos/>`__.
Here are the required commands:
.. code:: sh
$> sudo yum install -y yum-utils
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
$> sudo yum makecache fast
$> sudo yum install docker-ce
$> sudo systemctl start docker
Configure NFS
-------------
On Clients
~~~~~~~~~~
Your NFS Clients will mount Docker volumes over your NFS Server's shared
folders. Hence, you don't have to mount anything manually, you just have
to install the NFS commons:
On Ubuntu 14.04
^^^^^^^^^^^^^^^
Simply install the NFS commons:
.. code:: sh
$> sudo apt-get install nfs-common
On CentOS 7
^^^^^^^^^^^
Install the NFS utils, and then start the required services:
.. code:: sh
$> yum install nfs-utils
$> sudo systemctl enable rpcbind
$> sudo systemctl enable nfs-server
$> sudo systemctl enable nfs-lock
$> sudo systemctl enable nfs-idmap
$> sudo systemctl start rpcbind
$> sudo systemctl start nfs-server
$> sudo systemctl start nfs-lock
$> sudo systemctl start nfs-idmap
On Server
~~~~~~~~~
Your NFS Server will be the machine to physically host the data and
metadata. The package(s) we will install on it is slightly different
from the one we installed on the clients.
On Ubuntu 14.04
^^^^^^^^^^^^^^^
Install the NFS server specific package and the NFS commons:
.. code:: sh
$> sudo apt-get install nfs-kernel-server nfs-common
On CentOS 7
^^^^^^^^^^^
Same steps as with the client: install the NFS utils and start the
required services:
.. code:: sh
$> yum install nfs-utils
$> sudo systemctl enable rpcbind
$> sudo systemctl enable nfs-server
$> sudo systemctl enable nfs-lock
$> sudo systemctl enable nfs-idmap
$> sudo systemctl start rpcbind
$> sudo systemctl start nfs-server
$> sudo systemctl start nfs-lock
$> sudo systemctl start nfs-idmap
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Choose where your shared data and metadata from your local `S3
Server <http://www.scality.com/scality-s3-server/>`__ will be stored.
We chose to go with /var/nfs/data and /var/nfs/metadata. You also need
to set proper sharing permissions for these folders as they'll be shared
over NFS:
.. code:: sh
$> mkdir -p /var/nfs/data /var/nfs/metadata
$> chmod -R 777 /var/nfs/
Now you need to update your **/etc/exports** file. This is the file that
configures network permissions and rwx permissions for NFS access. By
default, Ubuntu applies the no\_subtree\_check option, so we declared
both folders with the same permissions, even though they're in the same
tree:
.. code:: sh
$> sudo vim /etc/exports
In this file, add the following lines:
.. code:: sh
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
Export this new NFS table:
.. code:: sh
$> sudo exportfs -a
Eventually, you need to allow for NFS mount from Docker volumes on other
machines. You need to change the Docker config in
**/lib/systemd/system/docker.service**:
.. code:: sh
$> sudo vim /lib/systemd/system/docker.service
In this file, change the **MountFlags** option:
.. code:: sh
MountFlags=shared
Now you just need to restart the NFS server and docker daemons so your
changes apply.
On Ubuntu 14.04
^^^^^^^^^^^^^^^
Restart your NFS Server and docker services:
.. code:: sh
$> sudo service nfs-kernel-server restart
$> sudo service docker restart
On CentOS 7
^^^^^^^^^^^
Restart your NFS Server and docker daemons:
.. code:: sh
$> sudo systemctl restart nfs-server
$> sudo systemctl daemon-reload
$> sudo systemctl restart docker
Set up your Docker Swarm service
--------------------------------
On All Machines
~~~~~~~~~~~~~~~
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We will now set up the Docker volumes that will be mounted to the NFS
Server and serve as data and metadata storage for S3 Server. These two
commands have to be replicated on all machines:
.. code:: sh
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
There is no need to ""docker exec" these volumes to mount them: the
Docker Swarm manager will do it when the Docker service will be started.
On Server
^^^^^^^^^
To start a Docker service on a Docker Swarm cluster, you first have to
initialize that cluster (i.e.: define a manager), then have the
workers/nodes join in, and then start the service. Initialize the swarm
cluster, and look at the response:
.. code:: sh
$> docker swarm init --advertise-addr 10.200.15.113
Swarm initialized: current node (db2aqfu3bzfzzs9b1kfeaglmq) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join \
--token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka \
10.200.15.113:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
On Clients
^^^^^^^^^^
Simply copy/paste the command provided by your docker swarm init. When
all goes well, you'll get something like this:
.. code:: sh
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
This node joined a swarm as a worker.
On Server
^^^^^^^^^
Start the service on your swarm cluster!
.. code:: sh
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
If you run a docker service ls, you should have the following output:
.. code:: sh
$> docker service ls
ID NAME MODE REPLICAS IMAGE
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
If your service won't start, consider disabling apparmor/SELinux.
Testing your High Availability S3Server
---------------------------------------
On All Machines
~~~~~~~~~~~~~~~
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Try to find out where your Scality S3 Server is actually running using
the **docker ps** command. It can be on any node of the swarm cluster,
manager or worker. When you find it, you can kill it, with **docker stop
<container id>** and you'll see it respawn on a different node of the
swarm cluster. Now you see, if one of your servers falls, or if docker
stops unexpectedly, your end user will still be able to access your
local S3 Server.
Troubleshooting
---------------
To troubleshoot the service you can run:
.. code:: sh
$> docker service ps s3docker service ps s3
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
If the error is truncated it is possible to have a more detailed view of
the error by inspecting the docker task ID:
.. code:: sh
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
Off you go!
-----------
Let us know what you use this functionality for, and if you'd like any
specific developments around it. Or, even better: come and contribute to
our `Github repository <https://github.com/scality/s3/>`__! We look
forward to meeting you!
S3FS
====
Export your buckets as a filesystem with s3fs on top of s3server
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
tool that allows you to mount an S3 bucket on a filesystem-like backend.
It is available both on Debian and RedHat distributions. For this
tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
Scality's S3 Server.
Deploying S3 Server with SSL
----------------------------
First, you need to deploy **S3 Server**. This can be done very easily
via `our DockerHub
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
with a file backend).
*Note:* *- If you don't have docker installed on your machine, here
are the `instructions to install it for your
distribution <https://docs.docker.com/engine/installation/>`__*
You also necessarily have to set up SSL with S3Server to use s3fs. We
have a nice
`tutorial <https://s3.scality.com/v1.0/page/scality-with-ssl>`__ to help
you do it.
s3fs setup
----------
Installing s3fs
~~~~~~~~~~~~~~~
s3fs has quite a few dependencies. As explained in their
`README <https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation>`__,
the following commands should install everything for Ubuntu 14.04:
.. code:: sh
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
Now you want to install s3fs per se:
.. code:: sh
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
$> cd s3fs-fuse
$> ./autogen.sh
$> ./configure
$> make
$> sudo make install
Check that s3fs is properly installed by checking its version. it should
answer as below:
.. code:: sh
$> s3fs --version
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
Configuring s3fs
~~~~~~~~~~~~~~~~
s3fs expects you to provide it with a password file. Our file is
``/etc/passwd-s3fs``. The structure for this file is
``ACCESSKEYID:SECRETKEYID``, so, for S3Server, you can run:
.. code:: sh
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
$> chmod 600 /etc/passwd-s3fs
Using S3Server with s3fs
------------------------
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
.. code:: sh
$> mkdir /mnt/tests3fs
Then, you want to create a bucket on your local S3Server; we named it
``tests3fs``:
.. code:: sh
$> s3cmd mb s3://tests3fs
*Note:* *- If you've never used s3cmd with our S3Server, our README
provides you with a `recommended
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
Now you can mount your bucket to your mountpoint with s3fs:
.. code:: sh
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
*If you're curious, the structure of this command is*
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
options are mandatory and serve the following purposes:
* ``passwd_file``\ *: specifiy path to password file;
* ``url``\ *: specify the hostname used by your SSL provider;
* ``use_path_request_style``\ *: force path style (by default, s3fs
uses subdomains (DNS style)).*
| From now on, you can either add files to your mountpoint, or add
objects to your bucket, and they'll show in the other.
| For example, let's' create two files, and then a directory with a file
in our mountpoint:
.. code:: sh
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
$> mkdir /mnt/tests3fs/dir1
$> touch /mnt/tests3fs/dir1/file3
Now, I can use s3cmd to show me what is actually in S3Server:
.. code:: sh
$> s3cmd ls -r s3://tests3fs
2017-02-28 17:28 0 s3://tests3fs/dir1/
2017-02-28 17:29 0 s3://tests3fs/dir1/file3
2017-02-28 17:28 0 s3://tests3fs/file1
2017-02-28 17:28 0 s3://tests3fs/file2
Now you can enjoy a filesystem view on your local S3Server!
Duplicity
=========
How to backup your files with S3 Server.
Installing
-----------
Installing Duplicity and its dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Second, you want to install
`Duplicity <http://duplicity.nongnu.org/index.html>`__. You have to
download `this
tarball <https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz>`__,
decompress it, and then checkout the README inside, which will give you
a list of dependencies to install. If you're using Ubuntu 14.04, this is
your lucky day: here is a lazy step by step install.
.. code:: sh
$> apt-get install librsync-dev gnupg
$> apt-get install python-dev python-pip python-lockfile
$> pip install -U boto
Then you want to actually install Duplicity:
.. code:: sh
$> tar zxvf duplicity-0.7.11.tar.gz
$> cd duplicity-0.7.11
$> python setup.py install
Using
------
Testing your installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
First, we're just going to quickly check that S3 Server is actually
running. To do so, simply run ``$> docker ps`` . You should see one
container named ``scality/s3server``. If that is not the case, try
``$> docker start s3server``, and check again.
Secondly, as you probably know, Duplicity uses a module called **Boto**
to send requests to S3. Boto requires a configuration file located in
**``/etc/boto.cfg``** to have your credentials and preferences. Here is
a minimalistic config `that you can finetune following these
instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
::
[Credentials]
aws_access_key_id = accessKey1
aws_secret_access_key = verySecretKey1
[Boto]
# If using SSL, set to True
is_secure = False
# If using SSL, unmute and provide absolute path to local CA certificate
# ca_certificates_file = /absolute/path/to/ca.crt
*Note:* *If you want to set up SSL with S3 Server, check out our
`tutorial <http://link/to/SSL/tutorial>`__*
At this point, we've met all the requirements to start running S3 Server
as a backend to Duplicity. So we should be able to back up a local
folder/file to local S3. Let's try with the duplicity decompressed
folder:
.. code:: sh
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
*Note:* *Duplicity will prompt you for a symmetric encryption
passphrase. Save it somewhere as you will need it to recover your
data. Alternatively, you can also add the ``--no-encryption`` flag
and the data will be stored plain.*
If this command is succesful, you will get an output looking like this:
::
--------------[ Backup Statistics ]--------------
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
EndTime 1486486547.40 (Tue Feb 7 16:55:47 2017)
ElapsedTime 0.27 (0.27 seconds)
SourceFiles 388
SourceFileSize 6634529 (6.33 MB)
NewFiles 388
NewFileSize 6634529 (6.33 MB)
DeletedFiles 0
ChangedFiles 0
ChangedFileSize 0 (0 bytes)
ChangedDeltaSize 0 (0 bytes)
DeltaEntries 388
RawDeltaSize 6392865 (6.10 MB)
TotalDestinationSizeChange 2003677 (1.91 MB)
Errors 0
-------------------------------------------------
Congratulations! You can now backup to your local S3 through duplicity
:)
Automating backups
~~~~~~~~~~~~~~~~~~~
Now you probably want to back up your files periodically. The easiest
way to do this is to write a bash script and add it to your crontab.
Here is my suggestion for such a file:
.. code:: sh
#!/bin/bash
# Export your passphrase so you don't have to type anything
export PASSPHRASE="mypassphrase"
# If you want to use a GPG Key, put it here and unmute the line below
#GPG_KEY=
# Define your backup bucket, with localhost specified
DEST="s3://127.0.0.1:8000/testbuckets3server/"
# Define the absolute path to the folder you want to backup
SOURCE=/root/testfolder
# Set to "full" for full backups, and "incremental" for incremental backups
# Warning: you have to perform one full backup befor you can perform
# incremental ones on top of it
FULL=incremental
# How long to keep backups for; if you don't want to delete old
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
# for one month, "1D" for one day
OLDER_THAN="1Y"
# is_running checks whether duplicity is currently completing a task
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
# If duplicity is already completing a task, this will simply not run
if [ $is_running -eq 0 ]; then
echo "Backup for ${SOURCE} started"
# If you want to delete backups older than a certain time, we do it here
if [ "$OLDER_THAN" != "" ]; then
echo "Removing backups older than ${OLDER_THAN}"
duplicity remove-older-than ${OLDER_THAN} ${DEST}
fi
# This is where the actual backup takes place
echo "Backing up ${SOURCE}..."
duplicity ${FULL} \
${SOURCE} ${DEST}
# If you're using GPG, paste this in the command above
# --encrypt-key=${GPG_KEY} --sign-key=${GPG_KEY} \
# If you want to exclude a subfolder/file, put it below and
# paste this
# in the command above
# --exclude=/${SOURCE}/path_to_exclude \
echo "Backup for ${SOURCE} complete"
echo "------------------------------------"
fi
# Forget the passphrase...
unset PASSPHRASE
So let's say you put this file in ``/usr/local/sbin/backup.sh.`` Next
you want to run ``crontab -e`` and paste your configuration in the file
that opens. If you're unfamiliar with Cron, here is a good `How
To <https://help.ubuntu.com/community/CronHowto>`__. The folder I'm
backing up is a folder I modify permanently during my workday, so I want
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
the line I will paste in my crontab:
.. code:: cron
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
Now I can try and add / remove files from the folder I'm backing up, and
I will see incremental backups in my bucket.

161
docs/conf.py Normal file
View File

@ -0,0 +1,161 @@
# -*- coding: utf-8 -*-
#
# Zope docs documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 20 16:22:03 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scality-s3-server'
copyright = u'Apache License Version 2.0, 2004 http://www.apache.org/licenses/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '7.0.0'
# The full version, including alpha/beta/rc tags.
release = '7.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for
# all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'css/default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../res/Scality-S3-Server-Logo-Large.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
# html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ScalityS3doc'

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 45 KiB

15
docs/index.rst Normal file
View File

@ -0,0 +1,15 @@
Scality S3 Server
==================
.. _user-docs:
.. toctree::
:maxdepth: 2
:caption: Documentation
CONTRIBUTING
GETTING_STARTED
CLIENTS
DOCKER
INTEGRATIONS
ARCHITECTURE

4
docs/mkdocs.yml Normal file
View File

@ -0,0 +1,4 @@
# http://www.mkdocs.org/user-guide/configuration/
# https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes
site_name: Scality S3 documentation

View File

@ -1,4 +1,3 @@
'use strict'; // eslint-disable-line strict
require('babel-core/register')();
require('./lib/server.js').default();
require('./lib/server.js')();

83
init.js
View File

@ -1,83 +0,0 @@
'use strict'; // eslint-disable-line strict
require('babel-core/register');
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const async = require('async');
const constants = require('./constants').default;
const config = require('./lib/Config.js').default;
const logger = require('./lib/utilities/logger.js').logger;
let ioctl;
try {
ioctl = require('ioctl');
} catch (err) {
logger.warn('ioctl dependency is unavailable. skipping...');
}
function _setDirSyncFlag(path) {
const GETFLAGS = 2148034049;
const SETFLAGS = 1074292226;
const FS_DIRSYNC_FL = 65536;
const buffer = Buffer.alloc(8, 0);
const pathFD = fs.openSync(path, 'r');
const status = ioctl(pathFD, GETFLAGS, buffer);
assert.strictEqual(status, 0);
const currentFlags = buffer.readUIntLE(0, 8);
const flags = currentFlags | FS_DIRSYNC_FL;
buffer.writeUIntLE(flags, 0, 8);
const status2 = ioctl(pathFD, SETFLAGS, buffer);
assert.strictEqual(status2, 0);
fs.closeSync(pathFD);
const pathFD2 = fs.openSync(path, 'r');
const confirmBuffer = Buffer.alloc(8, 0);
ioctl(pathFD2, GETFLAGS, confirmBuffer);
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
logger.info('FS_DIRSYNC_FL set');
fs.closeSync(pathFD2);
}
if (config.backends.data !== 'file' && config.backends.metadata !== 'file') {
logger.info('No init required. Go forth and store data.');
process.exit(0);
}
const dataPath = config.filePaths.dataPath;
const metadataPath = config.filePaths.metadataPath;
fs.accessSync(dataPath, fs.F_OK | fs.R_OK | fs.W_OK);
fs.accessSync(metadataPath, fs.F_OK | fs.R_OK | fs.W_OK);
const warning = 'WARNING: Synchronization directory updates are not ' +
'supported on this platform. Newly written data could be lost ' +
'if your system crashes before the operating system is able to ' +
'write directory updates.';
if (os.type() === 'Linux' && os.endianness() === 'LE' && ioctl) {
try {
_setDirSyncFlag(dataPath);
_setDirSyncFlag(metadataPath);
} catch (err) {
logger.warn(warning, { error: err.stack });
}
} else {
logger.warn(warning);
}
// Create 3511 subdirectories for the data file backend
const subDirs = Array.from({ length: constants.folderHash },
(v, k) => (k).toString());
async.eachSeries(subDirs, (subDirName, next) => {
fs.mkdir(`${dataPath}/${subDirName}`, err => {
// If already exists, move on
if (err && err.code !== 'EEXIST') {
return next(err);
}
return next();
});
},
err => {
assert.strictEqual(err, null, `Error creating data files ${err}`);
logger.info('Init complete. Go forth and store data.');
});

View File

@ -1,13 +1,97 @@
import assert from 'assert';
import fs from 'fs';
import path from 'path';
const assert = require('assert');
const fs = require('fs');
const path = require('path');
import authDataChecker from './auth/in_memory/checker';
const authDataChecker = require('./auth/in_memory/checker');
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
// whitelist IP, CIDR for health checks
const defaultHealthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
const defaultLocalCache = { host: '127.0.0.1', port: 6379 };
function sproxydAssert(configSproxyd) {
const sproxydFields = [];
if (configSproxyd.bootstrap !== undefined) {
assert(Array.isArray(configSproxyd.bootstrap)
&& configSproxyd.bootstrap
.every(e => typeof e === 'string'),
'bad config: sproxyd.bootstrap must be an array of strings');
assert(configSproxyd.bootstrap.length > 0,
'bad config: sproxyd bootstrap list is empty');
sproxydFields.push('bootstrap');
}
if (configSproxyd.chordCos !== undefined) {
assert(typeof configSproxyd.chordCos === 'string',
'bad config: sproxyd.chordCos must be a string');
assert(configSproxyd.chordCos.match(/^[0-6]{1}$/),
'bad config: sproxyd.chordCos must be a digit smaller than 7');
sproxydFields.push('chordCos');
}
if (configSproxyd.path !== undefined) {
assert(typeof configSproxyd.path === 'string',
'bad config: sproxyd.path must be a string');
sproxydFields.push('path');
}
return sproxydFields;
}
function locationConstraintAssert(locationConstraints) {
const supportedBackends = ['mem', 'file', 'scality', 'aws_s3'];
assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => {
assert(typeof locationConstraints[l] === 'object',
'bad config: locationConstraints[region] must be an object');
assert(typeof locationConstraints[l].type === 'string',
'bad config: locationConstraints[region].type is ' +
'mandatory and must be a string');
assert(supportedBackends.indexOf(locationConstraints[l].type) > -1,
'bad config: locationConstraints[region].type must ' +
`be one of ${supportedBackends}`);
assert(typeof locationConstraints[l].legacyAwsBehavior
=== 'boolean',
'bad config: locationConstraints[region]' +
'.legacyAwsBehavior is mandatory and must be a boolean');
assert(typeof locationConstraints[l].details
=== 'object',
'bad config: locationConstraints[region].details is ' +
'mandatory and must be an object');
const details = locationConstraints[l].details;
const stringFields = [
'awsEndpoint',
'bucketName',
'credentialsProfile',
];
stringFields.forEach(field => {
if (details[field] !== undefined) {
assert(typeof details[field] === 'string',
`bad config: ${field} must be a string`);
}
});
if (details.bucketMatch !== undefined) {
assert(typeof details.bucketMatch === 'boolean',
'bad config: details.bucketMatch must be a boolean');
}
if (details.credentials !== undefined) {
assert(typeof details.credentials === 'object',
'bad config: details.credentials must be an object');
assert(typeof details.credentials.accessKey === 'string',
'bad config: credentials must include accessKey as string');
assert(typeof details.credentials.secretKey === 'string',
'bad config: credentials must include secretKey as string');
}
});
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
}
function cosParse(chordCos) {
// Cos number should only be first digit of config value
return Number.parseInt(chordCos, 10);
}
/**
* Reads from a config file and returns the content as a config object
*/
@ -16,21 +100,87 @@ class Config {
/*
* By default, the config file is "config.json" at the root.
* It can be overridden using the S3_CONFIG_FILE environment var.
* By default, the location config file is "locationConfig.json" at
* the root.
* It can be overridden using the S3_LOCATION_FILE environment var.
*/
this._basePath = path.join(__dirname, '..');
this.path = path.join(__dirname, '../config.json');
this.configPath = path.join(__dirname, '../config.json');
if (process.env.S3_CONFIG_FILE !== undefined) {
this.path = process.env.S3_CONFIG_FILE;
this.configPath = process.env.S3_CONFIG_FILE;
}
this.locationConfigPath = path.join(__dirname,
'../locationConfig.json');
if (process.env.S3_LOCATION_FILE !== undefined) {
this.locationConfigPath = process.env.S3_LOCATION_FILE;
}
// Read config automatically
this._getConfig();
this._getLocationConfig();
this._configureBackends();
}
_getLocationConfig() {
let locationConfig;
try {
const data = fs.readFileSync(this.locationConfigPath,
{ encoding: 'utf-8' });
locationConfig = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse location config file:
${err.message}`);
}
this.locationConstraints = {};
locationConstraintAssert(locationConfig);
this.locationConstraints = locationConfig;
Object.keys(locationConfig).forEach(l => {
const details = this.locationConstraints[l].details;
if (locationConfig[l].details.connector !== undefined) {
assert(typeof locationConfig[l].details.connector ===
'object', 'bad config: connector must be an object');
if (locationConfig[l].details.connector.sproxyd !==
undefined) {
details.connector.sproxyd =
locationConfig[l].details.connector.sproxyd;
const fields = sproxydAssert(
locationConfig[l].details.connector.sproxyd);
if (fields.indexOf('bootstrap') > -1) {
details.connector.sproxyd.bootstrap =
locationConfig[l].details.connector.sproxyd.bootstrap;
assert(Array.isArray(
details.connector.sproxyd.bootstrap) &&
details.connector.sproxyd.bootstrap.every(e =>
typeof e === 'string'),
'assignment error: sproxyd.bootstrap must be ' +
'an array of strings');
}
if (fields.indexOf('chordCos') > -1) {
details.connector.sproxyd.chordCos =
cosParse(locationConfig[l].details.connector.
sproxyd.chordCos);
assert(typeof details.connector.sproxyd.chordCos ===
'number', 'assignment error: chordCos must be a ' +
'number');
}
if (fields.indexOf('path') > -1) {
details.connector.sproxyd.chordCos =
locationConfig[l].details.connector.sproxyd.path;
assert(typeof details.connector.sproxyd.chordCos ===
'string', 'assignment error: sproxyd path must ' +
'be a string');
}
}
}
});
}
_getConfig() {
let config;
try {
const data = fs.readFileSync(this.path, { encoding: 'utf-8' });
const data = fs.readFileSync(this.configPath,
{ encoding: 'utf-8' });
config = JSON.parse(data);
} catch (err) {
throw new Error(`could not parse config file: ${err.message}`);
@ -63,13 +213,33 @@ class Config {
});
}
assert(typeof config.regions === 'object',
'bad config: the list of regions is mandatory');
assert(Object.keys(config.regions).every(
r => typeof r === 'string' && config.regions[r] instanceof Array
&& config.regions[r].every(e => typeof e === 'string')),
'bad config: regions must be a set of {region: [endpoints]}');
this.regions = config.regions;
if (config.replicationGroupId) {
assert(typeof config.replicationGroupId === 'string',
'bad config: replicationGroupId must be a string');
this.replicationGroupId = config.replicationGroupId;
} else {
this.replicationGroupId = 'RG001';
}
// legacy
if (config.regions !== undefined) {
throw new Error('bad config: regions key is deprecated. ' +
'Please use restEndpoints and locationConfig');
}
if (config.restEndpoints !== undefined) {
this.restEndpoints = {};
assert(typeof config.restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints');
assert(Object.keys(config.restEndpoints).every(
r => typeof config.restEndpoints[r] === 'string'),
'bad config: each endpoint must be a string');
this.restEndpoints = config.restEndpoints;
}
if (!config.restEndpoints) {
throw new Error('bad config: config must include restEndpoints');
}
this.websiteEndpoints = [];
if (config.websiteEndpoints !== undefined) {
@ -86,30 +256,14 @@ class Config {
this.clusters = config.clusters;
}
this.usEastBehavior = false;
if (config.usEastBehavior !== undefined) {
assert(typeof config.usEastBehavior === 'boolean');
this.usEastBehavior = config.usEastBehavior;
throw new Error('bad config: usEastBehavior key is deprecated. ' +
'Please use restEndpoints and locationConfig');
}
this.sproxyd = { bootstrap: [] };
// legacy
if (config.sproxyd !== undefined) {
if (config.sproxyd.bootstrap !== undefined) {
assert(Array.isArray(config.sproxyd.bootstrap)
&& config.sproxyd.bootstrap
.every(e => typeof e === 'string'),
'bad config: sproxyd.bootstrap must be a list of strings');
assert(config.sproxyd.bootstrap.length > 0,
'sproxyd bootstrap list is empty');
this.sproxyd.bootstrap = config.sproxyd.bootstrap;
}
if (config.sproxyd.chordCos !== undefined) {
assert(typeof config.sproxyd.chordCos === 'string',
'bad config: sproxyd.chordCos must be a string');
assert(config.sproxyd.chordCos.match(/^[0-9a-fA-F]{2}$/),
'bad config: sproxyd.chordCos must be a 2hex-chars string');
this.sproxyd.chordCos =
Number.parseInt(config.sproxyd.chordCos, 16);
}
throw new Error('bad config: sproxyd key is deprecated. ' +
'Please use restEndpoints and locationConfig');
}
this.bucketd = { bootstrap: [] };
@ -137,6 +291,81 @@ class Config {
}
}
if (config.dataClient) {
this.dataClient = {};
assert.strictEqual(typeof config.dataClient.host, 'string',
'bad config: data client host must be ' +
'a string');
this.dataClient.host = config.dataClient.host;
assert(Number.isInteger(config.dataClient.port)
&& config.dataClient.port > 0,
'bad config: dataClient port must be a positive ' +
'integer');
this.dataClient.port = config.dataClient.port;
}
if (config.metadataClient) {
this.metadataClient = {};
assert.strictEqual(
typeof config.metadataClient.host, 'string',
'bad config: metadata client host must be a string');
this.metadataClient.host = config.metadataClient.host;
assert(Number.isInteger(config.metadataClient.port)
&& config.metadataClient.port > 0,
'bad config: metadata client port must be a ' +
'positive integer');
this.metadataClient.port = config.metadataClient.port;
}
if (config.dataDaemon) {
this.dataDaemon = {};
assert.strictEqual(
typeof config.dataDaemon.bindAddress, 'string',
'bad config: data daemon bind address must be a string');
this.dataDaemon.bindAddress = config.dataDaemon.bindAddress;
assert(Number.isInteger(config.dataDaemon.port)
&& config.dataDaemon.port > 0,
'bad config: data daemon port must be a positive ' +
'integer');
this.dataDaemon.port = config.dataDaemon.port;
/**
* Configure the file paths for data if using the file
* backend. If no path provided, uses data at the root of
* the S3 project directory.
*/
this.dataDaemon.dataPath =
process.env.S3DATAPATH ?
process.env.S3DATAPATH : `${__dirname}/../localData`;
}
if (config.metadataDaemon) {
this.metadataDaemon = {};
assert.strictEqual(
typeof config.metadataDaemon.bindAddress, 'string',
'bad config: metadata daemon bind address must be a string');
this.metadataDaemon.bindAddress =
config.metadataDaemon.bindAddress;
assert(Number.isInteger(config.metadataDaemon.port)
&& config.metadataDaemon.port > 0,
'bad config: metadata daemon port must be a ' +
'positive integer');
this.metadataDaemon.port = config.metadataDaemon.port;
/**
* Configure the file path for metadata if using the file
* backend. If no path provided, uses data and metadata at
* the root of the S3 project directory.
*/
this.metadataDaemon.metadataPath =
process.env.S3METADATAPATH ?
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
}
if (process.env.ENABLE_LOCAL_CACHE) {
this.localCache = defaultLocalCache;
}
@ -328,6 +557,9 @@ class Config {
throw new Error('bad config: both certFilePaths.key and ' +
'certFilePaths.cert must be defined');
}
}
_configureBackends() {
/**
* Configure the backends for Authentication, Data and Metadata.
*/
@ -356,15 +588,34 @@ class Config {
if (process.env.S3AUTH_CONFIG) {
authfile = process.env.S3AUTH_CONFIG;
}
const authData = require(authfile);
let authData = require(authfile);
if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) {
authData = buildAuthDataAccount(
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
}
if (authDataChecker(authData)) {
throw new Error('bad config: invalid auth config file.');
}
this.authData = authData;
}
if (process.env.S3SPROXYD) {
data = process.env.S3SPROXYD;
if (process.env.S3DATA) {
const validData = ['mem', 'file', 'scality', 'multiple'];
assert(validData.indexOf(process.env.S3DATA) > -1,
'bad environment variable: S3DATA environment variable ' +
'should be one of mem/file/scality/multiple'
);
data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set'
);
if (process.env.S3METADATA) {
metadata = process.env.S3METADATA;
}
@ -377,21 +628,6 @@ class Config {
metadata,
kms,
};
/**
* Configure the file paths for data and metadata
* if using the file backend. If no path provided,
* uses data and metadata at the root of the S3 project directory
*/
const dataPath = process.env.S3DATAPATH ?
process.env.S3DATAPATH : `${__dirname}/../localData`;
const metadataPath = process.env.S3METADATAPATH ?
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
this.filePaths = {
dataPath,
metadataPath,
};
return config;
}
_verifyRedisPassword(password) {
@ -399,4 +635,9 @@ class Config {
}
}
export default new Config();
module.exports = {
sproxydAssert,
locationConstraintAssert,
cosParse,
config: new Config(),
};

View File

@ -1,8 +1,8 @@
import Redis from 'ioredis';
const Redis = require('ioredis');
import { logger } from './utilities/logger';
const logger = require('./utilities/logger');
export default class RedisClient {
module.exports = class RedisClient {
/**
* @constructor
* @param {string} host - Redis host
@ -53,4 +53,4 @@ export default class RedisClient {
clear(cb) {
return this._client.flushDb(cb);
}
}
};

View File

@ -1,6 +1,6 @@
import async from 'async';
const async = require('async');
export default class StatsClient {
class StatsClient {
/**
* @constructor
@ -152,3 +152,5 @@ export default class StatsClient {
});
}
}
module.exports = StatsClient;

View File

@ -1,73 +1,72 @@
import querystring from 'querystring';
const { auth, errors } = require('arsenal');
import { auth, errors } from 'arsenal';
import bucketDelete from './bucketDelete';
import bucketDeleteCors from './bucketDeleteCors';
import bucketDeleteWebsite from './bucketDeleteWebsite';
import bucketGet from './bucketGet';
import bucketGetACL from './bucketGetACL';
import bucketGetCors from './bucketGetCors';
import bucketGetVersioning from './bucketGetVersioning';
import bucketGetWebsite from './bucketGetWebsite';
import bucketHead from './bucketHead';
import bucketPut from './bucketPut';
import bucketPutACL from './bucketPutACL';
import bucketPutCors from './bucketPutCors';
import bucketPutVersioning from './bucketPutVersioning';
import bucketPutWebsite from './bucketPutWebsite';
import corsPreflight from './corsPreflight';
import completeMultipartUpload from './completeMultipartUpload';
import initiateMultipartUpload from './initiateMultipartUpload';
import listMultipartUploads from './listMultipartUploads';
import listParts from './listParts';
import multiObjectDelete from './multiObjectDelete';
import multipartDelete from './multipartDelete';
import objectCopy from './objectCopy';
import objectDelete from './objectDelete';
import objectGet from './objectGet';
import objectGetACL from './objectGetACL';
import objectHead from './objectHead';
import objectPut from './objectPut';
import objectPutACL from './objectPutACL';
import objectPutPart from './objectPutPart';
import objectPutCopyPart from './objectPutCopyPart';
import prepareRequestContexts from
'./apiUtils/authorization/prepareRequestContexts';
import serviceGet from './serviceGet';
import vault from '../auth/vault';
import websiteGet from './websiteGet';
import websiteHead from './websiteHead';
const bucketDelete = require('./bucketDelete');
const bucketDeleteCors = require('./bucketDeleteCors');
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketGet = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors');
const bucketGetVersioning = require('./bucketGetVersioning');
const bucketGetWebsite = require('./bucketGetWebsite');
const bucketGetLocation = require('./bucketGetLocation');
const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut');
const bucketPutACL = require('./bucketPutACL');
const bucketPutCors = require('./bucketPutCors');
const bucketPutVersioning = require('./bucketPutVersioning');
const bucketPutWebsite = require('./bucketPutWebsite');
const bucketPutReplication = require('./bucketPutReplication');
const corsPreflight = require('./corsPreflight');
const completeMultipartUpload = require('./completeMultipartUpload');
const initiateMultipartUpload = require('./initiateMultipartUpload');
const listMultipartUploads = require('./listMultipartUploads');
const listParts = require('./listParts');
const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy');
const objectDelete = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL');
const objectGetTagging = require('./objectGetTagging');
const objectHead = require('./objectHead');
const objectPut = require('./objectPut');
const objectPutACL = require('./objectPutACL');
const objectPutTagging = require('./objectPutTagging');
const objectPutPart = require('./objectPutPart');
const objectPutCopyPart = require('./objectPutCopyPart');
const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet');
const vault = require('../auth/vault');
const websiteGet = require('./websiteGet');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue');
const parseCopySource = require('./apiUtils/object/parseCopySource');
auth.setHandler(vault);
/* eslint-disable no-param-reassign */
const api = {
callApiMethod(apiMethod, request, log, callback, locationConstraint) {
callApiMethod(apiMethod, request, response, log, callback) {
let returnTagCount = true;
// no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') {
return this[apiMethod](request, log, callback);
}
let sourceBucket;
let sourceObject;
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
let source =
querystring.unescape(request.headers['x-amz-copy-source']);
// If client sends the source bucket/object with a leading /,
// remove it
if (source[0] === '/') {
source = source.slice(1);
const { sourceBucket, sourceObject, sourceVersionId, parsingError } =
parseCopySource(apiMethod, request.headers['x-amz-copy-source']);
if (parsingError) {
log.debug('error parsing copy source', {
error: parsingError,
});
return callback(parsingError);
}
const slashSeparator = source.indexOf('/');
if (slashSeparator === -1) {
return callback(errors.InvalidArgument);
}
// Pull the source bucket and source object separated by /
sourceBucket = source.slice(0, slashSeparator);
sourceObject = source.slice(slashSeparator + 1);
}
const requestContexts = prepareRequestContexts(apiMethod,
request, locationConstraint, sourceBucket, sourceObject);
const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId);
return auth.server.doAuth(request, log, (err, userInfo,
authorizationResults, streamingV4Params) => {
if (err) {
@ -75,6 +74,19 @@ const api = {
return callback(err);
}
if (authorizationResults) {
if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action
if (!authorizationResults[0].isAllowed) {
log.trace('get object authorization denial from Vault');
return callback(errors.AccessDenied);
}
// second item checks s3:GetObject(Version)Tagging action
if (!authorizationResults[1].isAllowed) {
log.trace('get tagging authorization denial ' +
'from Vault');
returnTagCount = false;
}
} else {
for (let i = 0; i < authorizationResults.length; i++) {
if (!authorizationResults[i].isAllowed) {
log.trace('authorization denial from Vault');
@ -82,20 +94,54 @@ const api = {
}
}
}
if (apiMethod === 'bucketPut') {
return bucketPut(userInfo, request, locationConstraint,
log, callback);
}
if (apiMethod === 'objectCopy' ||
apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, log, callback);
}
// issue 100 Continue to the client
writeContinue(request, response);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return this[apiMethod](userInfo, request, streamingV4Params,
log, callback);
}
const MAX_POST_LENGTH = request.method.toUpperCase() === 'POST' ?
1024 * 1024 : 1024 * 1024 / 2; // 1 MB or 512 KB
const post = [];
let postLength = 0;
request.on('data', chunk => {
postLength += chunk.length;
// Sanity check on post length
if (postLength <= MAX_POST_LENGTH) {
post.push(chunk);
}
return undefined;
});
request.on('error', err => {
log.trace('error receiving request', {
error: err,
});
return callback(errors.InternalError);
});
request.on('end', () => {
if (postLength > MAX_POST_LENGTH) {
log.error('body length is too long for request type',
{ postLength });
return callback(errors.InvalidRequest);
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
if (apiMethod === 'objectCopy' ||
apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request,
returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, callback);
});
return undefined;
}, 's3', requestContexts);
},
bucketDelete,
@ -106,12 +152,14 @@ const api = {
bucketGetCors,
bucketGetVersioning,
bucketGetWebsite,
bucketGetLocation,
bucketHead,
bucketPut,
bucketPutACL,
bucketPutCors,
bucketPutVersioning,
bucketPutWebsite,
bucketPutReplication,
corsPreflight,
completeMultipartUpload,
initiateMultipartUpload,
@ -120,12 +168,15 @@ const api = {
multiObjectDelete,
multipartDelete,
objectDelete,
objectDeleteTagging,
objectGet,
objectGetACL,
objectGetTagging,
objectCopy,
objectHead,
objectPut,
objectPutACL,
objectPutTagging,
objectPutPart,
objectPutCopyPart,
serviceGet,
@ -133,4 +184,4 @@ const api = {
websiteHead,
};
export default api;
module.exports = api;

View File

@ -1,6 +1,6 @@
import constants from '../../../../constants';
const constants = require('../../../../constants');
export function isBucketAuthorized(bucket, requestType, canonicalID) {
function isBucketAuthorized(bucket, requestType, canonicalID) {
// Check to see if user is authorized to perform a
// particular action on bucket based on ACLs.
// TODO: Add IAM checks and bucket policy checks.
@ -61,7 +61,7 @@ export function isBucketAuthorized(bucket, requestType, canonicalID) {
requestType === 'objectGet' || requestType === 'objectHead');
}
export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
const bucketOwner = bucket.getOwner();
if (!objectMD) {
return false;
@ -69,6 +69,12 @@ export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
if (objectMD['owner-id'] === canonicalID) {
return true;
}
// account is authorized if:
// - requesttype is "bucketOwnerAction" (example: for objectTagging) and
// - account is the bucket owner
if (requestType === 'bucketOwnerAction' && bucketOwner === canonicalID) {
return true;
}
if (requestType === 'objectGet' || requestType === 'objectHead') {
if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write'
@ -111,3 +117,8 @@ export function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
}
return false;
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
};

View File

@ -1,43 +1,117 @@
import { policies } from 'arsenal';
const RequestContext = policies.RequestContext;
const { policies } = require('arsenal');
const RequestContext = policies.RequestContext;
let apiMethodAfterVersionCheck;
const apiMethodWithVersion = { objectGetACL: true, objectPutACL: true,
objectGet: true, objectDelete: true, objectPutTagging: true,
objectGetTagging: true, objectDeleteTagging: true };
function isHeaderAcl(headers) {
return headers['x-amz-grant-read'] || headers['x-amz-grant-read-acp'] ||
headers['x-amz-grant-write-acp'] || headers['x-amz-grant-full-control'] ||
headers['x-amz-acl'];
}
/**
* Prepares the requestContexts array to send to Vault for authorization
* @param {string} apiMethod - api being called
* @param {object} request - request object
* @param {string} locationConstraint - locationConstraint if bucket put
* operation
* @param {string} sourceBucket - name of sourceBucket if copy request
* @param {string} sourceObject - name of sourceObject if copy request
* @param {string} sourceVersionId - value of sourceVersionId if copy request
* @return {RequestContext []} array of requestContexts
*/
export default function prepareRequestContexts(apiMethod, request,
locationConstraint, sourceBucket, sourceObject) {
function prepareRequestContexts(apiMethod, request, sourceBucket,
sourceObject, sourceVersionId) {
// if multiObjectDelete request, we want to authenticate
// before parsing the post body and creating multiple requestContexts
// so send null as requestContexts to Vault to avoid authorization
// checks at this point
if (apiMethod === 'multiObjectDelete') {
//
// If bucketPut request, we want to do the authorization check in the API
// itself (once we parse the locationConstraint from the xml body) so send
// null as the requestContext to Vault so it will only do an authentication
// check.
function generateRequestContext(apiMethod) {
return new RequestContext(request.headers,
request.query, request.bucketName, request.objectKey,
request.socket.remoteAddress, request.connection.encrypted,
apiMethod, 's3');
}
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null;
}
const requestContexts = [];
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
const getRequestContext = new RequestContext(request.headers,
request.query, sourceBucket, sourceObject,
request.socket.remoteAddress, request.connection.encrypted,
'objectGet', 's3', locationConstraint);
const putRequestContext = new RequestContext(request.headers,
request.query, request.bucketName, request.objectKey,
request.socket.remoteAddress, request.connection.encrypted,
'objectPut', 's3', locationConstraint);
requestContexts.push(getRequestContext, putRequestContext);
if (apiMethodWithVersion[apiMethod] && request.query &&
request.query.versionId) {
apiMethodAfterVersionCheck = `${apiMethod}Version`;
} else {
const requestContext = new RequestContext(request.headers,
request.query, request.bucketName, request.objectKey,
apiMethodAfterVersionCheck = apiMethod;
}
const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';
const reqQuery = Object.assign({}, request.query,
{ versionId: sourceVersionId });
const getRequestContext = new RequestContext(request.headers,
reqQuery, sourceBucket, sourceObject,
request.socket.remoteAddress, request.connection.encrypted,
apiMethod, 's3', locationConstraint);
objectGetAction, 's3');
const putRequestContext = generateRequestContext('objectPut');
requestContexts.push(getRequestContext, putRequestContext);
if (apiMethodAfterVersionCheck === 'objectCopy') {
// if tagging directive is COPY, "s3:PutObjectTagging" don't need
// to be included in the list of permitted actions in IAM policy
if (request.headers['x-amz-tagging'] &&
request.headers['x-amz-tagging-directive'] === 'REPLACE') {
const putTaggingRequestContext =
generateRequestContext('objectPutTagging');
requestContexts.push(putTaggingRequestContext);
}
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
}
} else if (apiMethodAfterVersionCheck === 'objectGet'
|| apiMethodAfterVersionCheck === 'objectGetVersion') {
const objectGetTaggingAction = (request.query &&
request.query.versionId) ? 'objectGetTaggingVersion' :
'objectGetTagging';
const getRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
const getTaggingRequestContext =
generateRequestContext(objectGetTaggingAction);
requestContexts.push(getRequestContext, getTaggingRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectPut') {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
// if put object (versioning) with tag set
if (request.headers['x-amz-tagging']) {
const putTaggingRequestContext =
generateRequestContext('objectPutTagging');
requestContexts.push(putTaggingRequestContext);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
} else {
const requestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(requestContext);
}
return requestContexts;
}
module.exports = prepareRequestContexts;

View File

@ -1,6 +1,7 @@
import escapeForXml from '../../../utilities/escapeForXML';
import { errors } from 'arsenal';
import { parseString } from 'xml2js';
const { parseString } = require('xml2js');
const { errors } = require('arsenal');
const escapeForXml = require('../../../utilities/escapeForXML');
/*
Format of xml request:
@ -32,7 +33,7 @@ const customizedErrs = {
};
// Helper validation methods
export const _validator = {
const _validator = {
/** _validator.validateNumberWildcards - check if string has multiple
* wildcards
@param {string} string - string to check for multiple wildcards
@ -286,7 +287,7 @@ function _validateCorsXml(rules) {
* @return {undefined} - calls callback with cors object on success, error on
* failure
*/
export function parseCorsXml(xml, log, cb) {
function parseCorsXml(xml, log, cb) {
parseString(xml, (err, result) => {
if (err) {
log.trace('xml parsing failed', {
@ -319,7 +320,7 @@ export function parseCorsXml(xml, log, cb) {
});
}
export function convertToXml(arrayRules) {
function convertToXml(arrayRules) {
const xml = [];
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
'<CORSConfiguration>');
@ -347,3 +348,9 @@ export function convertToXml(arrayRules) {
xml.push('</CORSConfiguration>');
return xml.join('');
}
module.exports = {
_validator,
parseCorsXml,
convertToXml,
};

View File

@ -1,13 +1,14 @@
import async from 'async';
import assert from 'assert';
import { errors } from 'arsenal';
const assert = require('assert');
const async = require('async');
const { errors } = require('arsenal');
import acl from '../../../metadata/acl';
import BucketInfo from '../../../metadata/BucketInfo';
import constants from '../../../../constants';
import createKeyForUserBucket from './createKeyForUserBucket';
import metadata from '../../../metadata/wrapper';
import kms from '../../../kms/wrapper';
const acl = require('../../../metadata/acl');
const BucketInfo = require('../../../metadata/BucketInfo');
const constants = require('../../../../constants');
const createKeyForUserBucket = require('./createKeyForUserBucket');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket;
@ -33,7 +34,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
const usersBucketBeingCalled = usersBucketAttrs ?
usersBucket : oldUsersBucket;
return metadata.putObjectMD(usersBucketBeingCalled, key,
omVal, log, err => {
omVal, {}, log, err => {
if (err && err.NoSuchBucket) {
// There must be no usersBucket so createBucket
// one using the new format
@ -66,7 +67,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
// Finally put the key in the new format
// usersBucket
return metadata.putObjectMD(usersBucket,
key, omVal, log, cb);
key, omVal, {}, log, cb);
});
}
return cb(err);
@ -111,7 +112,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
* @param {function} callback - callback with error or null as arguments
* @return {undefined}
*/
export function cleanUpBucket(bucketMD, canonicalID, log, callback) {
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
const bucketName = bucketMD.getName();
return addToUsersBucket(canonicalID, bucketName, log, err => {
if (err) {
@ -152,14 +153,12 @@ function bucketLevelServerSideEncryption(bucketName, headers, log, cb) {
* @param {object} headers - request headers
* @param {string} locationConstraint - locationConstraint provided in
* request body xml (if provided)
* @param {boolean} usEastBehavior - whether s3 is set up with a usEastBehavior
* config option
* @param {function} log - Werelogs logger
* @param {function} cb - callback to bucketPut
* @return {undefined}
*/
export function createBucket(authInfo, bucketName, headers,
locationConstraint, usEastBehavior, log, cb) {
function createBucket(authInfo, bucketName, headers,
locationConstraint, log, cb) {
log.trace('Creating bucket');
assert.strictEqual(typeof bucketName, 'string');
const canonicalID = authInfo.getCanonicalID();
@ -172,8 +171,6 @@ export function createBucket(authInfo, bucketName, headers,
if (locationConstraint !== undefined) {
bucket.setLocationConstraint(locationConstraint);
} else if (usEastBehavior) {
bucket.setLocationConstraint('us-east-1');
}
const parseAclParams = {
headers,
@ -238,20 +235,21 @@ export function createBucket(authInfo, bucketName, headers,
log.trace('bucket has transient flag or deleted flag. cleaning up');
return cleanUpBucket(newBucketMD, canonicalID, log, cb);
}
// If bucket exists in non-transient and non-deleted
// state and owned by requester then return BucketAlreadyOwnedByYou
// If bucket already exists in non-transient and non-deleted
// state and owned by requester, then return BucketAlreadyOwnedByYou
// error unless old AWS behavior (us-east-1)
// For old behavior:
// 1) new locationConstraint should either be undefined or not us-east-1
// 2) the existing locationConstraint must be us-east-1 or undefined
// 3) the s3 being hit must be set up to have usEastBehavior
if ((!locationConstraint || locationConstraint === 'us-east-1') &&
(!existingBucketMD.getLocationConstraint() ||
existingBucketMD.getLocationConstraint() === 'us-east-1') &&
usEastBehavior) {
// Existing locationConstraint must have legacyAwsBehavior === true
// New locationConstraint should have legacyAwsBehavior === true
if (isLegacyAWSBehavior(locationConstraint) &&
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
log.trace('returning 200 instead of 409 to mirror us-east-1');
return cb(null, existingBucketMD);
}
return cb(errors.BucketAlreadyOwnedByYou, existingBucketMD);
});
}
module.exports = {
cleanUpBucket,
createBucket,
};

View File

@ -1,13 +1,13 @@
import { errors } from 'arsenal';
import assert from 'assert';
import async from 'async';
const assert = require('assert');
const async = require('async');
const { errors } = require('arsenal');
import { logger } from '../../../utilities/logger';
const logger = require('../../../utilities/logger');
import constants from '../../../../constants';
import createKeyForUserBucket from './createKeyForUserBucket';
import metadata from '../../../metadata/wrapper';
import kms from '../../../kms/wrapper';
const constants = require('../../../../constants');
const createKeyForUserBucket = require('./createKeyForUserBucket');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket;
@ -18,7 +18,7 @@ function _deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
'_deleteUserBucketEntry' });
const keyForUserBucket = createKeyForUserBucket(canonicalID,
constants.splitter, bucketName);
metadata.deleteObjectMD(usersBucket, keyForUserBucket, log, error => {
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
// If the object representing the bucket is not in the
// users bucket just continue
if (error && error.NoSuchKey) {
@ -29,7 +29,7 @@ function _deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
constants.oldSplitter, bucketName);
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
log, error => {
{}, log, error => {
if (error && !error.NoSuchKey) {
log.error('from metadata while deleting user bucket',
{ error });
@ -71,7 +71,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
* @param {string} canonicalID - bucket owner's canonicalID
* @return {undefined}
*/
export function invisiblyDelete(bucketName, canonicalID) {
function invisiblyDelete(bucketName, canonicalID) {
const log = logger.newRequestLogger();
log.trace('deleting bucket with deleted flag invisibly', { bucketName });
return _deleteUserBucketEntry(bucketName, canonicalID, log, err => {
@ -104,20 +104,28 @@ export function invisiblyDelete(bucketName, canonicalID) {
* @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined}
*/
export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string');
return async.waterfall([
function checkForObjectsStep(next) {
return metadata.listObject(bucketName, { maxKeys: 1 }, log,
(err, objectsListRes) => {
const params = { maxKeys: 1, listingType: 'DelimiterVersions' };
// We list all the versions as we want to return BucketNotEmpty
// error if there are any versions or delete markers in the bucket.
// Works for non-versioned buckets as well since listing versions
// includes null (non-versioned) objects in the result.
return metadata.listObject(bucketName, params, log,
(err, list) => {
if (err) {
log.error('error from metadata', { error: err });
return next(err);
}
if (objectsListRes.Contents.length) {
const length = (list.Versions ? list.Versions.length : 0) +
(list.DeleteMarkers ? list.DeleteMarkers.length : 0);
log.debug('listing result', { length });
if (length) {
log.debug('bucket delete failed',
{ error: errors.BucketNotEmpty });
return next(errors.BucketNotEmpty);
@ -188,3 +196,8 @@ export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
});
});
}
module.exports = {
invisiblyDelete,
deleteBucket,
};

View File

@ -1,4 +1,4 @@
import { invisiblyDelete } from './bucketDeletion';
const { invisiblyDelete } = require('./bucketDeletion');
/**
* Checks whether to proceed with a request based on the bucket flags
@ -7,7 +7,7 @@ import { invisiblyDelete } from './bucketDeletion';
* @param {string} requestType - type of api request
* @return {boolean} true if the bucket should be shielded, false otherwise
*/
export default function (bucket, requestType) {
function bucketShield(bucket, requestType) {
const invisiblyDeleteRequests = ['bucketGet', 'bucketHead',
'bucketGetACL', 'bucketOwnerAction', 'objectGet', 'objectGetACL',
'objectHead', 'objectPutACL', 'objectDelete'];
@ -27,3 +27,5 @@ export default function (bucket, requestType) {
}
return false;
}
module.exports = bucketShield;

View File

@ -1,10 +1,9 @@
import { errors } from 'arsenal';
import { parseString } from 'xml2js';
const { parseString } = require('xml2js');
const { errors } = require('arsenal');
import escapeForXml from '../../../utilities/escapeForXML';
import {
WebsiteConfiguration,
} from '../../../metadata/WebsiteConfiguration';
const escapeForXml = require('../../../utilities/escapeForXML');
const { WebsiteConfiguration }
= require('../../../metadata/WebsiteConfiguration');
/*
Format of xml request:
@ -55,7 +54,7 @@ function _isValidElem(elem) {
* @param {boolean} [validateParent] - validate format of parent element
* @return {boolean} true / false - if parsed xml element contains child
*/
export function xmlContainsElem(parent, requiredElem, options) {
function xmlContainsElem(parent, requiredElem, options) {
// Non-top level xml is parsed into object in the following manner.
// Example: <Parent><requiredElem>value</requiredElem>
@ -332,7 +331,7 @@ function _validateWebsiteConfigXml(parsingResult) {
return websiteConfig;
}
export function parseWebsiteConfigXml(xml, log, cb) {
function parseWebsiteConfigXml(xml, log, cb) {
parseString(xml, (err, result) => {
if (err) {
log.trace('xml parsing failed', {
@ -364,7 +363,7 @@ export function parseWebsiteConfigXml(xml, log, cb) {
});
}
export function convertToXml(config) {
function convertToXml(config) {
const xml = [];
const indexDocument = config.getIndexDocument();
const errorDocument = config.getErrorDocument();
@ -428,3 +427,9 @@ export function convertToXml(config) {
xml.push('</WebsiteConfiguration>');
return xml.join('');
}
module.exports = {
xmlContainsElem,
parseWebsiteConfigXml,
convertToXml,
};

View File

@ -1,5 +1,6 @@
export default function createKeyForUserBucket(canonicalID,
function createKeyForUserBucket(canonicalID,
splitter, bucketName) {
return `${canonicalID}${splitter}${bucketName}`;
}
module.exports = createKeyForUserBucket;

View File

@ -0,0 +1,16 @@
const parseXML = require('../../../utilities/parseXML');
const ReplicationConfiguration = require('./models/ReplicationConfiguration');
// Handle the steps for returning a valid replication configuration object.
function getReplicationConfiguration(xml, log, cb) {
return parseXML(xml, log, (err, result) => {
if (err) {
return cb(err);
}
const validator = new ReplicationConfiguration(result, log);
const configErr = validator.parseConfiguration();
return cb(configErr || null, validator.getReplicationConfiguration());
});
}
module.exports = getReplicationConfiguration;

View File

@ -0,0 +1,340 @@
const assert = require('assert');
const UUID = require('node-uuid');
const { errors } = require('arsenal');
const MAX_RULES = 1000;
const RULE_ID_LIMIT = 255;
const validStorageClasses = [
undefined,
'STANDARD',
'STANDARD_IA',
'REDUCED_REDUNDANCY',
];
/**
Example XML request:
<ReplicationConfiguration>
<Role>IAM-role-ARN</Role>
<Rule>
<ID>Rule-1</ID>
<Status>rule-status</Status>
<Prefix>key-prefix</Prefix>
<Destination>
<Bucket>arn:aws:s3:::bucket-name</Bucket>
<StorageClass>
optional-destination-storage-class-override
</StorageClass>
</Destination>
</Rule>
<Rule>
<ID>Rule-2</ID>
...
</Rule>
...
</ReplicationConfiguration>
*/
class ReplicationConfiguration {
/**
* Create a ReplicationConfiguration instance
* @param {string} xml - The parsed XML
* @param {object} log - Werelogs logger
* @return {object} - ReplicationConfiguration instance
*/
constructor(xml, log) {
this._parsedXML = xml;
this._log = log;
this._configPrefixes = [];
this._configIDs = [];
// The bucket metadata model of replication config. Note there is a
// single `destination` property because we can replicate to only one
// other bucket. Thus each rule is simplified to these properties.
this._role = null;
this._destination = null;
this._rules = null;
}
/**
* Get the role of the bucket replication configuration
* @return {string|null} - The role if defined, otherwise `null`
*/
getRole() {
return this._role;
}
/**
* The bucket to replicate data to
* @return {string|null} - The bucket if defined, otherwise `null`
*/
getDestination() {
return this._destination;
}
/**
* The rules for replication configuration
* @return {string|null} - The rules if defined, otherwise `null`
*/
getRules() {
return this._rules;
}
/**
* Get the replication configuration
* @return {object} - The replication configuration
*/
getReplicationConfiguration() {
return {
role: this.getRole(),
destination: this.getDestination(),
rules: this.getRules(),
};
}
/**
* Build the rule object from the parsed XML of the given rule
* @param {object} rule - The rule object from this._parsedXML
* @return {object} - The rule object to push into the `Rules` array
*/
_buildRuleObject(rule) {
const obj = {
prefix: rule.Prefix[0],
enabled: rule.Status[0] === 'Enabled',
};
// ID is an optional property, but create one if not provided or is ''.
// We generate a 48-character alphanumeric, unique ID for the rule.
obj.id = rule.ID && rule.ID[0] !== '' ? rule.ID[0] :
Buffer.from(UUID.v4()).toString('base64');
// StorageClass is an optional property.
if (rule.Destination[0].StorageClass) {
obj.storageClass = rule.Destination[0].StorageClass[0];
}
return obj;
}
/**
* Check that the `Role` property of the configuration is valid
* @return {undefined}
*/
_parseRole() {
const Role = this._parsedXML.ReplicationConfiguration.Role;
if (!Role) {
return errors.MalformedXML;
}
// TODO: Update to validate role priveleges after implemented in Vault.
// Role should be an IAM user name.
const arr = Role[0].split(':');
const isValidARN = arr.length === 7 ||
(arr.length === 6 && arr[5].split('/').length === 2);
if (!isValidARN) {
return errors.InvalidArgument.customizeDescription(
'Invalid Role specified in replication configuration');
}
this._role = Role[0];
return undefined;
}
/**
* Check that the `Rules` property array is valid
* @return {undefined}
*/
_parseRules() {
// Note that the XML uses 'Rule' while the config object uses 'Rules'.
const { Rule } = this._parsedXML.ReplicationConfiguration;
if (!Rule || Rule.length < 1) {
return errors.MalformedXML;
}
if (Rule.length > MAX_RULES) {
return errors.InvalidRequest.customizeDescription(
'Number of defined replication rules cannot exceed 1000');
}
const err = this._parseEachRule(Rule);
if (err) {
return err;
}
return undefined;
}
/**
* Check that each rule in the `Rules` property array is valid
* @param {array} rules - The rule array from this._parsedXML
* @return {undefined}
*/
_parseEachRule(rules) {
const rulesArr = [];
for (let i = 0; i < rules.length; i++) {
const err =
this._parseStatus(rules[i]) || this._parsePrefix(rules[i]) ||
this._parseID(rules[i]) || this._parseDestination(rules[i]);
if (err) {
return err;
}
rulesArr.push(this._buildRuleObject(rules[i]));
}
this._rules = rulesArr;
return undefined;
}
/**
* Check that the `Status` property is valid
* @param {object} rule - The rule object from this._parsedXML
* @return {undefined}
*/
_parseStatus(rule) {
const status = rule.Status && rule.Status[0];
if (!status || !['Enabled', 'Disabled'].includes(status)) {
return errors.MalformedXML;
}
return undefined;
}
/**
* Check that the `Prefix` property is valid
* @param {object} rule - The rule object from this._parsedXML
* @return {undefined}
*/
_parsePrefix(rule) {
const prefix = rule.Prefix && rule.Prefix[0];
// An empty string prefix should be allowed.
if (!prefix && prefix !== '') {
return errors.MalformedXML;
}
if (prefix.length > 1024) {
return errors.InvalidArgument.customizeDescription('Rule prefix ' +
'cannot be longer than maximum allowed key length of 1024');
}
// Each Prefix in a list of rules must not overlap. For example, two
// prefixes 'TaxDocs' and 'TaxDocs/2015' are overlapping. An empty
// string prefix is expected to overlap with any other prefix.
for (let i = 0; i < this._configPrefixes.length; i++) {
const used = this._configPrefixes[i];
if (prefix.startsWith(used) || used.startsWith(prefix)) {
return errors.InvalidRequest.customizeDescription('Found ' +
`overlapping prefixes '${used}' and '${prefix}'`);
}
}
this._configPrefixes.push(prefix);
return undefined;
}
/**
* Check that the `ID` property is valid
* @param {object} rule - The rule object from this._parsedXML
* @return {undefined}
*/
_parseID(rule) {
const id = rule.ID && rule.ID[0];
if (id && id.length > RULE_ID_LIMIT) {
return errors.InvalidArgument
.customizeDescription('Rule Id cannot be greater than 255');
}
// Each ID in a list of rules must be unique.
if (this._configIDs.includes(id)) {
return errors.InvalidRequest.customizeDescription(
'Rule Id must be unique');
}
this._configIDs.push(id);
return undefined;
}
/**
* Check that the `StorageClass` is a valid class
* @param {string} storageClass - The storage class to validate
* @return {boolean} `true` if valid, otherwise `false`
*/
static _isValidStorageClass(storageClass) {
return validStorageClasses.includes(storageClass);
}
/**
* Check that the `StorageClass` property is valid
* @param {object} destination - The destination object from this._parsedXML
* @return {undefined}
*/
_parseStorageClass(destination) {
const storageClass = destination.StorageClass &&
destination.StorageClass[0];
if (!ReplicationConfiguration._isValidStorageClass(storageClass)) {
return errors.MalformedXML;
}
return undefined;
}
/**
* Check that the `Bucket` property is valid
* @param {object} destination - The destination object from this._parsedXML
* @return {undefined}
*/
_parseBucket(destination) {
const bucket = destination.Bucket && destination.Bucket[0];
if (!bucket) {
return errors.MalformedXML;
}
const isValidARN = bucket.split(':').length === 6;
if (!isValidARN) {
return errors.InvalidArgument
.customizeDescription('Invalid bucket ARN');
}
// We can replicate objects only to one destination bucket.
if (this._destination && this._destination !== bucket) {
return errors.InvalidRequest.customizeDescription(
'The destination bucket must be same for all rules');
}
this._destination = bucket;
return undefined;
}
/**
* Check that the `destination` property is valid
* @param {object} rule - The rule object from this._parsedXML
* @return {undefined}
*/
_parseDestination(rule) {
const dest = rule.Destination && rule.Destination[0];
if (!dest) {
return errors.MalformedXML;
}
const err = this._parseBucket(dest) || this._parseStorageClass(dest);
if (err) {
return err;
}
return undefined;
}
/**
* Check that the request configuration is valid
* @return {undefined}
*/
parseConfiguration() {
const err = this._parseRole() || this._parseRules();
if (err) {
return err;
}
return undefined;
}
/**
* Validate the bucket metadata replication configuration structure and
* value types
* @param {object} config - The replication configuration to validate
* @return {undefined}
*/
static validateConfig(config) {
assert.strictEqual(typeof config, 'object');
const { role, rules, destination } = config;
assert.strictEqual(typeof role, 'string');
assert.strictEqual(typeof destination, 'string');
assert.strictEqual(Array.isArray(rules), true);
rules.forEach(rule => {
assert.strictEqual(typeof rule, 'object');
const { prefix, enabled, id, storageClass } = rule;
assert.strictEqual(typeof prefix, 'string');
assert.strictEqual(typeof enabled, 'boolean');
assert(id === undefined || typeof id === 'string');
assert(this._isValidStorageClass(storageClass) === true);
});
}
}
module.exports = ReplicationConfiguration;

View File

@ -0,0 +1,207 @@
const { config } = require('../../../Config');
const escapeForXML = require('../../../utilities/escapeForXML');
class BackendInfo {
/**
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @constructor
* @param {string | undefined} objectLocationConstraint - location constraint
* for object based on user meta header
* @param {string | undefined } bucketLocationConstraint - location
* constraint for bucket based on bucket metadata
* @param {string} requestEndpoint - endpoint to which request was made
*/
constructor(objectLocationConstraint, bucketLocationConstraint,
requestEndpoint) {
this._objectLocationConstraint = objectLocationConstraint;
this._bucketLocationConstraint = bucketLocationConstraint;
this._requestEndpoint = requestEndpoint;
return this;
}
/**
* validate proposed location constraint against config
* @param {string | undefined} locationConstraint - value of user
* metadata location constraint header or bucket location constraint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
*/
static isValidLocationConstraint(locationConstraint, log) {
if (Object.keys(config.locationConstraints).
indexOf(locationConstraint) < 0) {
log.trace('proposed locationConstraint is invalid',
{ locationConstraint });
return false;
}
return true;
}
/**
* validate that request endpoint is listed in the restEndpoint config
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if present, false if not
*/
static isRequestEndpointPresent(requestEndpoint, log) {
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
log.trace('requestEndpoint does not match config restEndpoints',
{ requestEndpoint });
return false;
}
return true;
}
/**
* validate that locationConstraint for request Endpoint matches
* one config locationConstraint
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if matches, false if not
*/
static isRequestEndpointValueValid(requestEndpoint, log) {
if (Object.keys(config.locationConstraints).indexOf(config
.restEndpoints[requestEndpoint]) < 0) {
log.trace('the default locationConstraint for request' +
'Endpoint does not match any config locationConstraint',
{ requestEndpoint });
return false;
}
return true;
}
/**
* validate that s3 server is running with a file or memory backend
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if running with file/mem backend, false if not
*/
static isMemOrFileBackend(requestEndpoint, log) {
if (config.backends.data === 'mem' ||
config.backends.data === 'file') {
log.trace('use data backend for the location', {
dataBackend: config.backends.data,
method: 'isMemOrFileBackend',
});
return true;
}
return false;
}
/**
* validate requestEndpoint against config or mem/file data backend
* - if there is no match for the request endpoint in the config
* restEndpoints and data backend is set to mem or file we will use this
* data backend for the location.
* - if locationConstraint for request Endpoint does not match
* any config locationConstraint, we will return an error
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
*/
static isValidRequestEndpointOrBackend(requestEndpoint, log) {
if (!BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
return BackendInfo.isMemOrFileBackend(requestEndpoint, log);
}
return BackendInfo.isRequestEndpointValueValid(requestEndpoint, log);
}
/**
* validate controlling BackendInfo Parameter
* @param {string | undefined} objectLocationConstraint - value of user
* metadata location constraint header
* @param {string | null} bucketLocationConstraint - location
* constraint from bucket metadata
* @param {string} requestEndpoint - endpoint of request
* @param {object} log - werelogs logger
* @return {object} - location contraint validity
*/
static controllingBackendParam(objectLocationConstraint,
bucketLocationConstraint, requestEndpoint, log) {
if (objectLocationConstraint) {
if (BackendInfo.isValidLocationConstraint(objectLocationConstraint,
log)) {
log.trace('objectLocationConstraint is valid');
return { isValid: true };
}
log.trace('objectLocationConstraint is invalid');
return { isValid: false, description: 'Object Location Error - ' +
`Your object location "${escapeForXML(objectLocationConstraint)}"` +
'is not in your location config - Please update.' };
}
if (bucketLocationConstraint) {
if (BackendInfo.isValidLocationConstraint(bucketLocationConstraint,
log)) {
log.trace('bucketLocationConstraint is valid');
return { isValid: true };
}
log.trace('bucketLocationConstraint is invalid');
return { isValid: false, description: 'Bucket Location Error - ' +
`Your bucket location "${escapeForXML(bucketLocationConstraint)}"` +
' is not in your location config - Please update.' };
}
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
log)) {
return { isValid: false, description: 'Endpoint Location Error - ' +
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
'in your config OR the default location constraint for request ' +
`endpoint "${escapeForXML(requestEndpoint)}" does not ` +
'match any config locationConstraint - Please update.' };
}
return { isValid: true };
}
/**
* Return objectLocationConstraint
* @return {string | undefined} objectLocationConstraint;
*/
getObjectLocationConstraint() {
return this._objectLocationConstraint;
}
/**
* Return bucketLocationConstraint
* @return {string | undefined} bucketLocationConstraint;
*/
getBucketLocationConstraint() {
return this._bucketLocationConstraint;
}
/**
* Return requestEndpoint
* @return {string} requestEndpoint;
*/
getRequestEndpoint() {
return this._requestEndpoint;
}
/**
* Return locationConstraint that should be used with put request
* Order of priority is:
* (1) objectLocationConstraint,
* (2) bucketLocationConstraint,
* (3) default locationConstraint for requestEndpoint if requestEndpoint
* is listed in restEndpoints in config.json
* (4) default data backend
* @return {string} locationConstraint;
*/
getControllingLocationConstraint() {
const objectLC = this.getObjectLocationConstraint();
const bucketLC = this.getBucketLocationConstraint();
const reqEndpoint = this.getRequestEndpoint();
if (objectLC) {
return objectLC;
}
if (bucketLC) {
return bucketLC;
}
if (config.restEndpoints[reqEndpoint]) {
return config.restEndpoints[reqEndpoint];
}
return config.backends.data;
}
}
module.exports = {
BackendInfo,
};

View File

@ -64,7 +64,7 @@ function _headersMatchRule(headers, allowedHeaders) {
* in a preflight CORS request
* @return {(null|object)} - matching rule if found; null if no match
*/
export function findCorsRule(rules, origin, method, headers) {
function findCorsRule(rules, origin, method, headers) {
return rules.find(rule => {
if (rule.allowedMethods.indexOf(method) === -1) {
return false;
@ -99,7 +99,7 @@ export function findCorsRule(rules, origin, method, headers) {
* for a CORS preflight request
* @return {object} resHeaders - headers to include in response
*/
export function generateCorsResHeaders(rule, origin, method, headers,
function generateCorsResHeaders(rule, origin, method, headers,
isPreflight) {
const resHeaders = {
'access-control-max-age': rule.maxAgeSeconds,
@ -130,3 +130,8 @@ isPreflight) {
}
return resHeaders;
}
module.exports = {
findCorsRule,
generateCorsResHeaders,
};

View File

@ -0,0 +1,185 @@
const async = require('async');
const { errors, s3validators } = require('arsenal');
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
const constants = require('../../../../constants');
const data = require('../../../data/wrapper');
const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing } = require('./versioning');
const removeAWSChunked = require('./removeAWSChunked');
const { config } = require('../../../Config');
const validateWebsiteHeader = require('./websiteServing')
.validateWebsiteHeader;
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => {
if (err) {
return callback(err);
}
if (dataToDelete) {
const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null;
data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, deleteLog);
}
return callback(null, result);
});
}
/** createAndStoreObject - store data, store metadata, and delete old data
* and old metadata as necessary
* @param {string} bucketName - name of bucket
* @param {BucketInfo} bucketMD - BucketInfo instance
* @param {string} objectKey - name of object
* @param {object} objMD - object metadata
* @param {AuthInfo} authInfo - AuthInfo instance with requester's info
* @param {string} canonicalID - user's canonical ID
* @param {object} cipherBundle - cipher bundle that encrypts the data
* @param {Request} request - http request object
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
* @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {RequestLogger} log - logger instance
* @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version
* result.versionId - unencrypted versionId returned by metadata
*/
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
log, callback) {
const size = isDeleteMarker ? 0 : request.parsedContentLength;
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (!validateWebsiteHeader(websiteRedirectHeader)) {
const err = errors.InvalidRedirectLocation;
log.debug('invalid x-amz-website-redirect-location' +
`value ${websiteRedirectHeader}`, { error: err });
return callback(err);
}
const metaHeaders = isDeleteMarker ? [] :
getMetaHeaders(request.headers);
if (metaHeaders instanceof Error) {
log.debug('user metadata validation failed', {
error: metaHeaders,
method: 'createAndStoreObject',
});
return process.nextTick(() => callback(metaHeaders));
}
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
const objectKeyContext = {
bucketName,
owner: canonicalID,
namespace: request.namespace,
objectKey,
metaHeaders,
};
// If the request was made with a pre-signed url, the x-amz-acl 'header'
// might be in the query string rather than the actual headers so include
// it here
const headers = request.headers;
if (request.query && request.query['x-amz-acl']) {
headers['x-amz-acl'] = request.query['x-amz-acl'];
}
const metadataStoreParams = {
objectKey,
authInfo,
metaHeaders,
size,
headers,
isDeleteMarker,
log,
};
if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control'];
metadataStoreParams.contentDisposition =
request.headers['content-disposition'];
metadataStoreParams.contentEncoding =
removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
}
const backendInfoObj =
locationConstraintCheck(request, null, bucketMD, log);
if (backendInfoObj.err) {
return process.nextTick(() => {
callback(backendInfoObj.err);
});
}
const backendInfo = backendInfoObj.backendInfo;
const location = backendInfo.getControllingLocationConstraint();
const locationType = config.locationConstraints[location].type;
/* eslint-disable camelcase */
const dontSkipBackend = { aws_s3: true };
/* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
return async.waterfall([
function storeData(next) {
if (size === 0 && !dontSkipBackend[locationType]) {
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
return next(null, null, null);
}
return dataStore(objectKeyContext, cipherBundle, request, size,
streamingV4Params, backendInfo, log, next);
},
function processDataResult(dataGetInfo, calculatedHash, next) {
if (dataGetInfo === null || dataGetInfo === undefined) {
return next(null, null);
}
// So that data retrieval information for MPU's and
// regular puts are stored in the same data structure,
// place the retrieval info here into a single element array
const { key, dataStoreName, dataStoreType, dataStoreETag } =
dataGetInfo;
const dataGetInfoArr = [{ key, size, start: 0, dataStoreName,
dataStoreType, dataStoreETag }];
if (cipherBundle) {
dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme;
dataGetInfoArr[0].cipheredDataKey =
cipherBundle.cipheredDataKey;
}
metadataStoreParams.contentMD5 = calculatedHash;
return next(null, dataGetInfoArr);
},
function getVersioningInfo(infoArr, next) {
return versioningPreprocessing(bucketName, bucketMD,
metadataStoreParams.objectKey, objMD, log, (err, options) => {
if (err) {
// TODO: check AWS error when user requested a specific
// version before any versions have been put
const logLvl = err === errors.BadRequest ?
'debug' : 'error';
log[logLvl]('error getting versioning info', {
error: err,
method: 'versioningPreprocessing',
});
}
return next(err, options, infoArr);
});
},
function storeMDAndDeleteData(options, infoArr, next) {
metadataStoreParams.versionId = options.versionId;
metadataStoreParams.versioning = options.versioning;
metadataStoreParams.isNull = options.isNull;
metadataStoreParams.nullVersionId = options.nullVersionId;
return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams,
options.dataToDelete, requestLogger, request.method, next);
},
], callback);
}
module.exports = createAndStoreObject;

View File

@ -0,0 +1,52 @@
const { errors } = require('arsenal');
const { BackendInfo } = require('./BackendInfo');
const constants = require('../../../../constants');
/**
* locationConstraintCheck - if new config, on object put, object copy,
* or initiate MPU request, gathers object location constraint,
* bucket locationconstraint, and request endpoint and checks their validity
* @param {request} request - normalized request object
* @param {object} metaHeaders - headers of metadata storage params used in
* objectCopy api
* @param {BucketInfo} bucket - metadata BucketInfo instance
* @param {object} log - Werelogs instance
* @return {object} - consists of three keys: error, controllingLC, and
* backendInfo. backendInfo only has value if new config
*/
function locationConstraintCheck(request, metaHeaders, bucket, log) {
let backendInfoObj = {};
let objectLocationConstraint;
if (metaHeaders) {
objectLocationConstraint =
metaHeaders[constants.objectLocationConstraintHeader];
} else {
objectLocationConstraint = request
.headers[constants.objectLocationConstraintHeader];
}
const bucketLocationConstraint = bucket.getLocationConstraint();
const requestEndpoint = request.parsedHost;
const controllingBackend = BackendInfo.controllingBackendParam(
objectLocationConstraint, bucketLocationConstraint,
requestEndpoint, log);
if (!controllingBackend.isValid) {
backendInfoObj = {
err: errors.InvalidArgument.customizeDescription(controllingBackend.
description),
};
return backendInfoObj;
}
const backendInfo = new BackendInfo(objectLocationConstraint,
bucketLocationConstraint, requestEndpoint);
backendInfoObj = {
err: null,
controllingLC: backendInfo.getControllingLocationConstraint(),
backendInfo,
};
return backendInfoObj;
}
module.exports = locationConstraintCheck;

View File

@ -0,0 +1,39 @@
const url = require('url');
const querystring = require('querystring');
const { errors } = require('arsenal');
const { decodeVersionId } = require('./versioning');
/** parseCopySource - parse objectCopy or objectPutCopyPart copy source header
* @param {string} apiMethod - api method
* @param {string} copySourceHeader - 'x-amz-copy-source' request header
* @return {object} - sourceBucket, sourceObject, sourceVersionId, parsingError
*/
function parseCopySource(apiMethod, copySourceHeader) {
if (apiMethod !== 'objectCopy' && apiMethod !== 'objectPutCopyPart') {
return {};
}
const { pathname, query } = url.parse(copySourceHeader);
let source = querystring.unescape(pathname);
// If client sends the source bucket/object with a leading /, remove it
if (source[0] === '/') {
source = source.slice(1);
}
const slashSeparator = source.indexOf('/');
if (slashSeparator === -1) {
return { parsingError: errors.InvalidArgument };
}
// Pull the source bucket and source object separated by /
const sourceBucket = source.slice(0, slashSeparator);
const sourceObject = source.slice(slashSeparator + 1);
const sourceVersionId =
decodeVersionId(query ? querystring.parse(query) : undefined);
if (sourceVersionId instanceof Error) {
const err = sourceVersionId;
return { parsingError: err };
}
return { sourceBucket, sourceObject, sourceVersionId };
}
module.exports = parseCopySource;

View File

@ -1,60 +0,0 @@
import { errors } from 'arsenal';
/**
* parseRange - Validate and parse range request header
* @param {string} rangeHeader - range header from request
* which should be in form bytes=0-9
* @param {number} totalLength - totalLength of object
* @return {object} object containing range (array | undefined) and error if
* range is invalid
*/
export function parseRange(rangeHeader, totalLength) {
// If the range is invalid in any manner, AWS just returns the full object
// (end is inclusive so minus 1)
const maxEnd = totalLength - 1;
let range = undefined;
if (!rangeHeader.startsWith('bytes=')
|| rangeHeader.indexOf('-') < 0
// Multiple ranges not supported
|| rangeHeader.indexOf(',') > 0) {
return { range };
}
const rangePortion = rangeHeader.replace('bytes=', '').split('-');
if (rangePortion.length > 2) {
return { range };
}
let start;
let end;
// Handle incomplete specifier where just offset from end is given
if (rangePortion[0] === '') {
const offset = parseInt(rangePortion[1], 10);
if (Number.isNaN(offset)) {
return { range };
}
start = totalLength - offset;
end = maxEnd;
// Handle incomplete specifier where just starting place is given
// meaning range goes from start of range to end of object
} else if (rangePortion[1] === '') {
start = parseInt(rangePortion[0], 10);
end = maxEnd;
} else {
start = parseInt(rangePortion[0], 10);
end = parseInt(rangePortion[1], 10);
}
// InvalidRange when the resource being accessed does not cover
// the byte range
if (start >= totalLength && end >= totalLength) {
return { range, error: errors.InvalidRange };
}
end = Math.min(end, maxEnd);
if (Number.isNaN(start) || Number.isNaN(end) || start > end) {
return { range };
}
if (start < 0) {
start = 0;
}
range = [start, end];
return { range };
}

View File

@ -5,9 +5,11 @@
* @param {string} sourceHeader - Content-Encoding header from request headers
* @return {string} new value w. 'aws-chunked'/'aws-chunked,' substring removed
*/
export default function removeAWSChunked(sourceHeader) {
function removeAWSChunked(sourceHeader) {
if (sourceHeader === undefined) {
return undefined;
}
return sourceHeader.replace(/aws-chunked,?/, '');
}
module.exports = removeAWSChunked;

View File

@ -0,0 +1,90 @@
/**
* @param {array} dataLocations - all data locations
* @param {array} outerRange - range from request
* @return {array} parsedLocations - dataLocations filtered for
* what needed and ranges added for particular parts as needed
*/
function setPartRanges(dataLocations, outerRange) {
const parsedLocations = [];
if (!outerRange) {
return dataLocations.slice();
}
const begin = outerRange[0];
const end = outerRange[1];
// If have single location, do not need to break up range among parts
// and might not have a start and size property
// on the dataLocation (because might be pre- md-model-version 2),
// so just set range as property
if (dataLocations.length === 1) {
const soleLocation = dataLocations[0];
soleLocation.range = [begin, end];
// If missing size, does not impact get range.
// We modify size here in case this function is used for
// object put part copy where will need size.
// If pre-md-model-version 2, object put part copy will not
// be allowed, so not an issue that size not modified here.
if (dataLocations[0].size) {
const partSize = parseInt(dataLocations[0].size, 10);
soleLocation.size =
Math.min(partSize, end - begin + 1).toString();
}
parsedLocations.push(soleLocation);
return parsedLocations;
}
// Range is inclusive of endpoint so need plus 1
const max = end - begin + 1;
let total = 0;
for (let i = 0; i < dataLocations.length; i++) {
if (total >= max) {
break;
}
const partStart = parseInt(dataLocations[i].start, 10);
const partSize = parseInt(dataLocations[i].size, 10);
if (partStart + partSize <= begin) {
continue;
}
if (partStart >= begin) {
// If the whole part is in the range, just include it
if (partSize + total <= max) {
const partWithoutRange = dataLocations[i];
partWithoutRange.size = partSize.toString();
parsedLocations.push(partWithoutRange);
total += partSize;
// Otherwise set a range limit on the part end
// and we're done
} else {
const partWithRange = dataLocations[i];
// Need to subtract one from endPart since range
// includes endPart in byte count
const endPart = Math.min(partSize - 1, max - total - 1);
partWithRange.range = [0, endPart];
// modify size to be stored for object put part copy
partWithRange.size = (endPart + 1).toString();
parsedLocations.push(dataLocations[i]);
break;
}
} else {
// Offset start (and end if necessary)
const partWithRange = dataLocations[i];
const startOffset = begin - partStart;
// Use full remaining part if remaining partSize is less
// than byte range we need to satisfy. Or use byte range
// we need to satisfy taking into account any startOffset
const endPart = Math.min(partSize - 1,
max - total + startOffset - 1);
partWithRange.range = [startOffset, endPart];
// modify size to be stored for object put part copy
partWithRange.size = (endPart - startOffset + 1).toString();
parsedLocations.push(partWithRange);
// Need to add byte back since with total we are counting
// number of bytes while the endPart and startOffset
// are in terms of range which include the endpoint
total += endPart - startOffset + 1;
}
}
return parsedLocations;
}
module.exports = setPartRanges;

View File

@ -1,8 +1,8 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
const { parseRange } = require('arsenal/lib/network/http/utils');
import constants from '../../../../constants';
import routesUtils from '../../../routes/routesUtils';
import { parseRange } from './parseRange';
const constants = require('../../../../constants');
const setPartRanges = require('./setPartRanges');
/**
* Uses the source object metadata and the requestHeaders
* to determine the location of the data to be copied and the
@ -13,7 +13,6 @@ import { parseRange } from './parseRange';
* @return {object} object containing error if any or a dataLocator (array)
* and objectSize (number) if no error
*/
export default
function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
let dataLocator;
// If 0 byte object just set dataLocator to empty array
@ -40,8 +39,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
parseInt(sourceObjMD['content-length'], 10);
let copyObjectSize = sourceSize;
if (rangeHeader) {
const { range, error } = parseRange(rangeHeader,
sourceSize);
const { range, error } = parseRange(rangeHeader, sourceSize);
if (error) {
return { error };
}
@ -60,10 +58,8 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
};
}
if (range) {
dataLocator =
routesUtils.setPartRanges(dataLocator, range);
copyObjectSize = Math.min(sourceSize - range[0],
range[1] - range[0] + 1);
dataLocator = setPartRanges(dataLocator, range);
copyObjectSize = range[1] - range[0] + 1;
}
}
if (copyObjectSize > constants.maximumAllowedPartSize) {
@ -73,3 +69,5 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
}
return { dataLocator, copyObjectSize };
}
module.exports = setUpCopyLocator;

View File

@ -1,6 +1,7 @@
import { errors } from 'arsenal';
import V4Transform from '../../../auth/streamingV4/V4Transform';
import data from '../../../data/wrapper';
const { errors } = require('arsenal');
const V4Transform = require('../../../auth/streamingV4/V4Transform');
const data = require('../../../data/wrapper');
/**
* Prepares the stream if the chunks are sent in a v4 Auth request
@ -45,7 +46,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
contentMD5,
});
log.trace('contentMD5 does not match, deleting data');
data.batchDelete(dataRetrievalInfo, log);
data.batchDelete(dataRetrievalInfo, null, null, log);
return cb(errors.BadDigest);
}
if (completedHash) {
@ -55,7 +56,7 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
}
/**
* Stores object and responds back with location and storage type
* Stores object and responds back with key and storage type
* @param {object} objectContext - object's keyContext for sproxyd Key
* computation (put API)
* @param {object} cipherBundle - cipher bundle that encrypt the data
@ -64,14 +65,16 @@ function checkHashMatchMD5(stream, hashedStream, dataRetrievalInfo, log, cb) {
* @param {object | null } streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {BackendInfo} backendInfo - info to determine which data
* backend to use
* @param {RequestLogger} log - the current stream logger
* @param {function} cb - callback containing result for the next task
* @return {undefined}
*/
export function dataStore(objectContext, cipherBundle, stream, size,
streamingV4Params, log, cb) {
function dataStore(objectContext, cipherBundle, stream, size,
streamingV4Params, backendInfo, log, cb) {
const dataStream = prepareStream(stream, streamingV4Params, log, cb);
data.put(cipherBundle, dataStream, size, objectContext, log,
data.put(cipherBundle, dataStream, size, objectContext, backendInfo, log,
(err, dataRetrievalInfo, hashedStream) => {
if (err) {
log.error('error in datastore', {
@ -100,3 +103,7 @@ export function dataStore(objectContext, cipherBundle, stream, size,
return undefined;
});
}
module.exports = {
dataStore,
};

View File

@ -0,0 +1,226 @@
const { errors } = require('arsenal');
const { parseString } = require('xml2js');
const escapeForXml = require('../../../utilities/escapeForXML');
const tagRegex = new RegExp(/[^a-zA-Z0-9 +-=._:/]/g);
const errorInvalidArgument = errors.InvalidArgument
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
'tag name duplicates.');
const errorBadRequestLimit10 = errors.BadRequest
.customizeDescription('Object tags cannot be greater than 10');
/*
Format of xml request:
<Tagging>
<TagSet>
<Tag>
<Key>Tag Name</Key>
<Value>Tag Value</Value>
</Tag>
</TagSet>
</Tagging>
*/
const _validator = {
validateTagStructure: tag => tag
&& Object.keys(tag).length === 2
&& tag.Key && tag.Value
&& tag.Key.length === 1 && tag.Value.length === 1
&& tag.Key[0] !== undefined && tag.Value[0] !== undefined
&& typeof tag.Key[0] === 'string' && typeof tag.Value[0] === 'string',
validateXMLStructure: result =>
result && Object.keys(result).length === 1 &&
result.Tagging &&
result.Tagging.TagSet &&
result.Tagging.TagSet.length === 1 &&
(
result.Tagging.TagSet[0] === '' ||
result.Tagging.TagSet[0] &&
Object.keys(result.Tagging.TagSet[0]).length === 1 &&
result.Tagging.TagSet[0].Tag &&
Array.isArray(result.Tagging.TagSet[0].Tag)
),
validateKeyValue: (key, value) => {
if (key.length > 128 || key.match(tagRegex)) {
return errors.InvalidTag.customizeDescription('The TagKey you ' +
'have provided is invalid');
}
if (value.length > 256 || value.match(tagRegex)) {
return errors.InvalidTag.customizeDescription('The TagValue you ' +
'have provided is invalid');
}
return true;
},
};
/** _validateTags - Validate tags, returning an error if tags are invalid
* @param {object[]} tags - tags parsed from xml to be validated
* @param {string[]} tags[].Key - Name of the tag
* @param {string[]} tags[].Value - Value of the tag
* @return {(Error|object)} tagsResult - return object tags on success
* { key: value}; error on failure
*/
function _validateTags(tags) {
let result;
const tagsResult = {};
if (tags.length === 0) {
return tagsResult;
}
// Maximum number of tags per resource: 10
if (tags.length > 10) {
return errorBadRequestLimit10;
}
for (let i = 0; i < tags.length; i++) {
const tag = tags[i];
if (!_validator.validateTagStructure(tag)) {
return errors.MalformedXML;
}
const key = tag.Key[0];
const value = tag.Value[0];
if (!key) {
return errors.InvalidTag.customizeDescription('The TagKey you ' +
'have provided is invalid');
}
// Allowed characters are letters, whitespace, and numbers, plus
// the following special characters: + - = . _ : /
// Maximum key length: 128 Unicode characters
// Maximum value length: 256 Unicode characters
result = _validator.validateKeyValue(key, value);
if (result instanceof Error) {
return result;
}
tagsResult[key] = value;
}
// not repeating keys
if (tags.length > Object.keys(tagsResult).length) {
return errors.InvalidTag.customizeDescription('Cannot provide ' +
'multiple Tags with the same key');
}
return tagsResult;
}
/** parseTagXml - Parse and validate xml body, returning callback with object
* tags : { key: value}
* @param {string} xml - xml body to parse and validate
* @param {object} log - Werelogs logger
* @param {function} cb - callback to server
* @return {(Error|object)} - calls callback with tags object on success, error
* on failure
*/
function parseTagXml(xml, log, cb) {
parseString(xml, (err, result) => {
if (err) {
log.trace('xml parsing failed', {
error: err,
method: 'parseTagXml',
});
log.debug('invalid xml', { xml });
return cb(errors.MalformedXML);
}
if (!_validator.validateXMLStructure(result)) {
log.debug('xml validation failed', {
error: errors.MalformedXML,
method: '_validator.validateXMLStructure',
xml,
});
return cb(errors.MalformedXML);
}
// AWS does not return error if no tag
if (result.Tagging.TagSet[0] === '') {
return cb(null, []);
}
const validationRes = _validateTags(result.Tagging.TagSet[0].Tag);
if (validationRes instanceof Error) {
log.debug('tag validation failed', {
error: validationRes,
method: '_validateTags',
xml,
});
return cb(validationRes);
}
// if no error, validation returns tags object
return cb(null, validationRes);
});
}
function convertToXml(objectTags) {
const xml = [];
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
'<Tagging> <TagSet>');
if (objectTags && Object.keys(objectTags).length > 0) {
Object.keys(objectTags).forEach(key => {
xml.push(`<Tag><Key>${escapeForXml(key)}</Key>` +
`<Value>${escapeForXml(objectTags[key])}</Value></Tag>`);
});
}
xml.push('</TagSet> </Tagging>');
return xml.join('');
}
/** parseTagFromQuery - Parse and validate x-amz-tagging header (URL query
* parameter encoded), returning callback with object tags : { key: value}
* @param {string} tagQuery - tag(s) URL query parameter encoded
* @return {(Error|object)} - calls callback with tags object on success, error
* on failure
*/
function parseTagFromQuery(tagQuery) {
const tagsResult = {};
const pairs = tagQuery.split('&');
let key;
let value;
let emptyTag = 0;
if (pairs.length === 0) {
return tagsResult;
}
for (let i = 0; i < pairs.length; i++) {
const pair = pairs[i];
if (!pair) {
emptyTag ++;
continue;
}
const pairArray = pair.split('=');
if (pairArray.length !== 2) {
return errorInvalidArgument;
}
try {
key = decodeURIComponent(pairArray[0]);
value = decodeURIComponent(pairArray[1]);
} catch (err) {
return errorInvalidArgument;
}
if (!key) {
return errorInvalidArgument;
}
const errorResult = _validator.validateKeyValue(key, value);
if (errorResult instanceof Error) {
return errorResult;
}
tagsResult[key] = value;
}
// return InvalidArgument error if using the same key multiple times
if (pairs.length - emptyTag > Object.keys(tagsResult).length) {
return errorInvalidArgument;
}
if (Object.keys(tagsResult).length > 10) {
return errorBadRequestLimit10;
}
return tagsResult;
}
module.exports = {
_validator,
parseTagXml,
convertToXml,
parseTagFromQuery,
};

View File

@ -0,0 +1,321 @@
const { errors, versioning } = require('arsenal');
const async = require('async');
const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config');
const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata
// for null versions that are created before bucket versioning is configured
const nonVersionedObjId =
versionIdUtils.getInfVid(config.replicationGroupId);
/** decodedVidResult - decode the version id from a query object
* @param {object} [reqQuery] - request query object
* @param {string} [reqQuery.versionId] - version ID sent in request query
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
* fails due to improper format, otherwise undefined or the decoded version id
*/
function decodeVersionId(reqQuery) {
if (!reqQuery || !reqQuery.versionId) {
return undefined;
}
let versionId = reqQuery.versionId;
if (versionId === 'null') {
return versionId;
}
versionId = versionIdUtils.decode(versionId);
if (versionId instanceof Error) {
return errors.InvalidArgument
.customizeDescription('Invalid version id specified');
}
return versionId;
}
/** getVersionIdResHeader - return encrypted version ID if appropriate
* @param {object} [verCfg] - bucket versioning configuration
* @param {object} objectMD - object metadata
* @return {(string|undefined)} - undefined or encrypted version ID
* (if not 'null')
*/
function getVersionIdResHeader(verCfg, objectMD) {
if (verCfg) {
if (objectMD.isNull || (objectMD && !objectMD.versionId)) {
return 'null';
}
return versionIdUtils.encode(objectMD.versionId);
}
return undefined;
}
/**
* Checks for versionId in request query and returns error if it is there
* @param {object} query - request query
* @return {(Error|undefined)} - customized InvalidArgument error or undefined
*/
function checkQueryVersionId(query) {
if (query && query.versionId !== undefined) {
const customMsg = 'This operation does not accept a version-id.';
return errors.InvalidArgument.customizeDescription(customMsg);
}
return undefined;
}
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
if (err) {
log.debug('error from metadata storing null version as new version',
{ error: err });
}
cb(err, options);
});
}
/** get location of data for deletion
* @param {string} bucketName - name of bucket
* @param {string} objKey - name of object key
* @param {object} options - metadata options for getting object MD
* @param {string} options.versionId - version to get from metadata
* @param {RequestLogger} log - logger instanceof
* @param {function} cb - callback
* @return {undefined} - and call callback with (err, dataToDelete)
*/
function _getDeleteLocations(bucketName, objKey, options, log, cb) {
return metadata.getObjectMD(bucketName, objKey, options, log,
(err, versionMD) => {
if (err) {
log.debug('err from metadata getting specified version', {
error: err,
method: '_getDeleteLocations',
});
return cb(err);
}
if (!versionMD.location) {
return cb();
}
const dataToDelete = Array.isArray(versionMD.location) ?
versionMD.location : [versionMD.location];
return cb(null, dataToDelete);
});
}
function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
// before deleting null version md, retrieve location of data to delete
return _getDeleteLocations(bucketName, objKey, options, log,
(err, nullDataToDelete) => {
if (err) {
log.warn('could not find null version metadata', {
error: err,
method: '_deleteNullVersionMD',
});
return cb(err);
}
return metadata.deleteObjectMD(bucketName, objKey, options, log,
err => {
if (err) {
log.warn('metadata error deleting null version',
{ error: err, method: '_deleteNullVersionMD' });
return cb(err);
}
return cb(null, nullDataToDelete);
});
});
}
function processVersioningState(mst, vstat, cb) {
const options = {};
const storeOptions = {};
const delOptions = {};
// object does not exist or is not versioned (before versioning)
if (mst.versionId === undefined || mst.isNull) {
// versioning is suspended, overwrite existing master version
if (vstat === 'Suspended') {
options.versionId = '';
options.isNull = true;
options.dataToDelete = mst.objLocation;
// if null version exists, clean it up prior to put
if (mst.isNull) {
delOptions.versionId = mst.versionId;
return cb(null, options, null, delOptions);
}
return cb(null, options);
}
// versioning is enabled, create a new version
options.versioning = true;
if (mst.exists) {
// store master version in a new key
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
storeOptions.versionId = versionId;
storeOptions.isNull = true;
options.nullVersionId = versionId;
return cb(null, options, storeOptions);
}
return cb(null, options);
}
// master is versioned and is not a null version
const nullVersionId = mst.nullVersionId;
if (vstat === 'Suspended') {
// versioning is suspended, overwrite the existing master version
options.versionId = '';
options.isNull = true;
if (nullVersionId === undefined) {
return cb(null, options);
}
delOptions.versionId = nullVersionId;
return cb(null, options, null, delOptions);
}
// versioning is enabled, put the new version
options.versioning = true;
options.nullVersionId = nullVersionId;
return cb(null, options);
}
function getMasterState(objMD) {
if (!objMD) {
return {};
}
const mst = {
exists: true,
versionId: objMD.versionId,
isNull: objMD.isNull,
nullVersionId: objMD.nullVersionId,
};
if (objMD.location) {
mst.objLocation = Array.isArray(objMD.location) ?
objMD.location : [objMD.location];
}
return mst;
}
/** versioningPreprocessing - return versioning information for S3 to handle
* creation of new versions and manage deletion of old data and metadata
* @param {string} bucketName - name of bucket
* @param {object} bucketMD - bucket metadata
* @param {string} objectKey - name of object
* @param {object} objMD - obj metadata
* @param {RequestLogger} log - logger instance
* @param {function} callback - callback
* @return {undefined} and call callback with params (err, options):
* options.dataToDelete - (array/undefined) location of data to delete
* options.versionId - specific versionId to overwrite in metadata
* ('' overwrites the master version)
* options.versioning - (true/undefined) metadata instruction to create new ver
* options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
* options.deleteNullVersionData - whether to delete the data of the null ver
*/
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
log, callback) {
const options = {};
const mst = getMasterState(objMD);
const vCfg = bucketMD.getVersioningConfiguration();
// bucket is not versioning configured
if (!vCfg) {
options.dataToDelete = mst.objLocation;
return process.nextTick(callback, null, options);
}
// bucket is versioning configured
return async.waterfall([
function processState(next) {
processVersioningState(mst, vCfg.Status,
(err, options, storeOptions, delOptions) => {
process.nextTick(next, err, options, storeOptions,
delOptions);
});
},
function storeVersion(options, storeOptions, delOptions, next) {
if (!storeOptions) {
return process.nextTick(next, null, options, delOptions);
}
const versionMD = Object.assign({}, objMD, storeOptions);
const params = { versionId: storeOptions.versionId };
return _storeNullVersionMD(bucketName, objectKey, versionMD,
params, log, err => next(err, options, delOptions));
},
function deleteNullVersion(options, delOptions, next) {
if (!delOptions) {
return process.nextTick(next, null, options);
}
return _deleteNullVersionMD(bucketName, objectKey, delOptions, log,
(err, nullDataToDelete) => {
if (err) {
log.warn('unexpected error deleting null version md', {
error: err,
method: 'versioningPreprocessing',
});
// it's possible there was a concurrent request to delete
// the null version, so proceed with putting a new version
if (err === errors.NoSuchKey) {
return next(null, options);
}
return next(errors.InternalError);
}
Object.assign(options, { dataToDelete: nullDataToDelete });
return next(null, options);
});
},
], (err, options) => callback(err, options));
}
/** preprocessingVersioningDelete - return versioning information for S3 to
* manage deletion of objects and versions, including creation of delete markers
* @param {string} bucketName - name of bucket
* @param {object} bucketMD - bucket metadata
* @param {object} objectMD - obj metadata
* @param {string} [reqVersionId] - specific version ID sent as part of request
* @param {RequestLogger} log - logger instance
* @param {function} callback - callback
* @return {undefined} and call callback with params (err, options):
* options.deleteData - (true/undefined) whether to delete data (if undefined
* means creating a delete marker instead)
* options.versionId - specific versionId to delete
*/
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
reqVersionId, log, callback) {
const options = {};
// bucket is not versioning enabled
if (!bucketMD.getVersioningConfiguration()) {
options.deleteData = true;
return callback(null, options);
}
// bucket is versioning enabled
if (reqVersionId && reqVersionId !== 'null') {
// deleting a specific version
options.deleteData = true;
options.versionId = reqVersionId;
return callback(null, options);
}
if (reqVersionId) {
// deleting the 'null' version if it exists
if (objectMD.versionId === undefined) {
// object is not versioned, deleting it
options.deleteData = true;
return callback(null, options);
}
if (objectMD.isNull) {
// master is the null version
options.deleteData = true;
options.versionId = objectMD.versionId;
return callback(null, options);
}
if (objectMD.nullVersionId) {
// null version exists, deleting it
options.deleteData = true;
options.versionId = objectMD.nullVersionId;
return callback(null, options);
}
// null version does not exist, no deletion
// TODO check AWS behaviour for no deletion (seems having no error)
return callback(errors.NoSuchKey);
}
// not deleting any specific version, making a delete marker instead
return callback(null, options);
}
module.exports = {
decodeVersionId,
getVersionIdResHeader,
checkQueryVersionId,
versioningPreprocessing,
preprocessingVersioningDelete,
};

View File

@ -1,4 +1,3 @@
/**
* findRoutingRule - find applicable routing rule from bucket metadata
* @param {RoutingRule []} routingRules - array of routingRule objects
@ -8,7 +7,7 @@
* keys/values from routingRule.getRedirect() plus
* a key of prefixFromRule and a value of routingRule.condition.keyPrefixEquals
*/
export function findRoutingRule(routingRules, key, errCode) {
function findRoutingRule(routingRules, key, errCode) {
if (!routingRules || routingRules.length === 0) {
return undefined;
}
@ -70,7 +69,7 @@ export function findRoutingRule(routingRules, key, errCode) {
* @return {object} redirectInfo - select key/values stored in
* WebsiteConfiguration for a redirect -- protocol, replaceKeyWith and hostName
*/
export function extractRedirectInfo(location) {
function extractRedirectInfo(location) {
const redirectInfo = { redirectLocationHeader: true };
if (location.startsWith('/')) {
// redirect to another object in bucket
@ -89,3 +88,21 @@ export function extractRedirectInfo(location) {
}
return redirectInfo;
}
/**
* validateWebsiteHeader description]
* @param {string} header - value of
* x-amz-website-redirect-location header on a put
* object (or similar request -- initiate mpu, object copy)
* @return {boolean} true if valid, false if not
*/
function validateWebsiteHeader(header) {
return (!header || header.startsWith('/') ||
header.startsWith('http://') || header.startsWith('https://'));
}
module.exports = {
findRoutingRule,
extractRedirectInfo,
validateWebsiteHeader,
};

View File

@ -1,9 +1,9 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { deleteBucket } from './apiUtils/bucket/bucketDeletion';
import services from '../services';
import { pushMetric } from '../utapi/utilities';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { deleteBucket } = require('./apiUtils/bucket/bucketDeletion');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
/**
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
@ -15,7 +15,7 @@ import { pushMetric } from '../utapi/utilities';
* with the result and response headers
* @return {undefined}
*/
export default function bucketDelete(authInfo, request, log, cb) {
function bucketDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'bucketDelete' });
if (authInfo.isRequesterPublicUser()) {
@ -28,20 +28,19 @@ export default function bucketDelete(authInfo, request, log, cb) {
authInfo,
bucketName,
requestType: 'bucketDelete',
log,
};
return services.metadataValidateAuthorization(metadataValParams,
return metadataValidateBucket(metadataValParams, log,
(err, bucketMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD);
if (err) {
log.debug('error processing request',
{ method: 'metadataValidateAuthorization', error: err });
{ method: 'metadataValidateBucket', error: err });
return cb(err, corsHeaders);
}
log.trace('passed checks',
{ method: 'metadataValidateAuthorization' });
{ method: 'metadataValidateBucket' });
return deleteBucket(bucketMD, bucketName, authInfo.getCanonicalID(),
log, err => {
if (err) {
@ -55,3 +54,5 @@ export default function bucketDelete(authInfo, request, log, cb) {
});
});
}
module.exports = bucketDelete;

View File

@ -1,10 +1,10 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
@ -16,7 +16,7 @@ const requestType = 'bucketOwnerAction';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketDeleteCors(authInfo, request, log, callback) {
function bucketDeleteCors(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -66,3 +66,5 @@ export default function bucketDeleteCors(authInfo, request, log, callback) {
});
});
}
module.exports = bucketDeleteCors;

View File

@ -1,14 +1,14 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
export default function bucketDeleteWebsite(authInfo, request, log, callback) {
function bucketDeleteWebsite(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -58,3 +58,5 @@ export default function bucketDeleteWebsite(authInfo, request, log, callback) {
});
});
}
module.exports = bucketDeleteWebsite;

View File

@ -1,13 +1,16 @@
import querystring from 'querystring';
import constants from '../../constants';
const querystring = require('querystring');
const { errors, versioning } = require('arsenal');
import services from '../services';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import escapeForXML from '../utilities/escapeForXML';
import { pushMetric } from '../utapi/utilities';
import { errors } from 'arsenal';
const constants = require('../../constants');
const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXML = require('../utilities/escapeForXML');
const { pushMetric } = require('../utapi/utilities');
// Sample XML response:
const versionIdUtils = versioning.VersionID;
// Sample XML response for GET bucket objects:
/* <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix></Prefix>
@ -31,80 +34,135 @@ import { errors } from 'arsenal';
</CommonPrefixes>
</ListBucketResult>*/
/**
* bucketGet - Return list of objects in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
export default function bucketGet(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGet' });
const params = request.query;
const bucketName = request.bucketName;
const encoding = params['encoding-type'];
if (encoding !== undefined && encoding !== 'url') {
return callback(errors.InvalidArgument.customizeDescription('Invalid ' +
'Encoding Method specified in Request'));
}
const escapeXmlFn = encoding === 'url' ? querystring.escape : escapeForXML;
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
return callback(errors.InvalidArgument);
}
// AWS only returns 1000 keys even if max keys are greater.
// Max keys stated in response xml can be greater than actual
// keys returned.
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
/* eslint-disable max-len */
// sample XML response for GET bucket object versions:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples
/*
<?xml version="1.0" encoding="UTF-8"?>
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGet',
log,
};
const listParams = {
maxKeys: actualMaxKeys,
delimiter: params.delimiter,
marker: params.marker,
prefix: params.prefix,
};
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name>
<Prefix>my</Prefix>
<KeyMarker/>
<VersionIdMarker/>
<MaxKeys>5</MaxKeys>
<Delimiter>/</Delimiter>
<NextKeyMarker>>my-second-image.jpg</NextKeyMarker>
<NextVersionIdMarker>03jpff543dhffds434rfdsFDN943fdsFkdmqnh892</NextVersionIdMarker>
<IsTruncated>true</IsTruncated>
<Version>
<Key>my-image.jpg</Key>
<VersionId>3/L4kqtJl40Nr8X8gdRQBpUMLUo</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<DeleteMarker>
<Key>my-second-image.jpg</Key>
<VersionId>03jpff543dhffds434rfdsFDN943fdsFkdmqnh892</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-11-12T17:50:30.000Z</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</DeleteMarker>
<CommonPrefixes>
<Prefix>photos/</Prefix>
</CommonPrefixes>
</ListVersionsResult>
*/
/* eslint-enable max-len */
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', { error: err });
return callback(err, null, corsHeaders);
}
return services.getObjectListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
return callback(err, null, corsHeaders);
}
function processVersions(bucketName, listParams, list) {
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/' +
'2006-03-01/">',
`<Name>${bucketName}</Name>`
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
'<Name>', bucketName, '</Name>'
);
const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [
{ tag: 'Prefix', value: listParams.prefix },
{ tag: 'NextMarker', value: list.NextMarker },
{ tag: 'Marker', value: listParams.marker },
{ tag: 'MaxKeys', value: requestMaxKeys },
{ tag: 'KeyMarker', value: listParams.keyMarker },
{ tag: 'VersionIdMarker', value: listParams.versionIdMarker },
{ tag: 'NextKeyMarker', value: list.NextKeyMarker },
{ tag: 'NextVersionIdMarker', value: list.NextVersionIdMarker },
{ tag: 'MaxKeys', value: listParams.maxKeys },
{ tag: 'Delimiter', value: listParams.delimiter },
{ tag: 'EncodingType', value: encoding },
{ tag: 'EncodingType', value: listParams.encoding },
{ tag: 'IsTruncated', value: isTruncated },
];
const escapeXmlFn = listParams.encoding === 'url' ?
querystring.escape : escapeForXML;
xmlParams.forEach(p => {
if (p.value) {
const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ?
p.value : versionIdUtils.encode(p.value);
xml.push(`<${p.tag}>${escapeXmlFn(val)}</${p.tag}>`);
}
});
let lastKey = listParams.keyMarker ?
escapeXmlFn(listParams.keyMarker) : undefined;
list.Versions.forEach(item => {
const v = item.value;
const objectKey = escapeXmlFn(item.key);
const isLatest = lastKey !== objectKey;
lastKey = objectKey;
xml.push(
v.IsDeleteMarker ? '<DeleteMarker>' : '<Version>',
`<Key>${objectKey}</Key>`,
'<VersionId>',
(v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId),
'</VersionId>',
`<IsLatest>${isLatest}</IsLatest>`,
`<LastModified>${v.LastModified}</LastModified>`,
`<ETag>&quot;${v.ETag}&quot;</ETag>`,
`<Size>${v.Size}</Size>`,
'<Owner>',
`<ID>${v.Owner.ID}</ID>`,
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
'</Owner>',
`<StorageClass>${v.StorageClass}</StorageClass>`,
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>'
);
});
list.CommonPrefixes.forEach(item => {
const val = escapeXmlFn(item);
xml.push(`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`);
});
xml.push('</ListVersionsResult>');
return xml.join('');
}
function processMasterVersions(bucketName, listParams, list) {
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
'<Name>', bucketName, '</Name>'
);
const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [
{ tag: 'Prefix', value: listParams.prefix || '' },
{ tag: 'Marker', value: listParams.marker || '' },
{ tag: 'NextMarker', value: list.NextMarker },
{ tag: 'MaxKeys', value: listParams.maxKeys },
{ tag: 'Delimiter', value: listParams.delimiter },
{ tag: 'EncodingType', value: listParams.encoding },
{ tag: 'IsTruncated', value: isTruncated },
];
const escapeXmlFn = listParams.encoding === 'url' ?
querystring.escape : escapeForXML;
xmlParams.forEach(p => {
if (p.value) {
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
@ -117,9 +175,11 @@ export default function bucketGet(authInfo, request, log, callback) {
list.Contents.forEach(item => {
const v = item.value;
if (v.isDeleteMarker) {
return null;
}
const objectKey = escapeXmlFn(item.key);
xml.push(
return xml.push(
'<Contents>',
`<Key>${objectKey}</Key>`,
`<LastModified>${v.LastModified}</LastModified>`,
@ -135,17 +195,87 @@ export default function bucketGet(authInfo, request, log, callback) {
});
list.CommonPrefixes.forEach(item => {
const val = escapeXmlFn(item);
xml.push(
`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`
);
xml.push(`<CommonPrefixes><Prefix>${val}</Prefix></CommonPrefixes>`);
});
xml.push('</ListBucketResult>');
pushMetric('listBucket', log, {
return xml.join('');
}
/**
* bucketGet - Return list of objects in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function bucketGet(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGet' });
const params = request.query;
const bucketName = request.bucketName;
const encoding = params['encoding-type'];
if (encoding !== undefined && encoding !== 'url') {
return callback(errors.InvalidArgument.customizeDescription('Invalid ' +
'Encoding Method specified in Request'));
}
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
return callback(errors.InvalidArgument);
}
// AWS only returns 1000 keys even if max keys are greater.
// Max keys stated in response xml can be greater than actual
// keys returned.
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
const metadataValParams = {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
bucketName,
requestType: 'bucketGet',
};
const listParams = {
listingType: 'DelimiterMaster',
maxKeys: actualMaxKeys,
delimiter: params.delimiter,
marker: params.marker,
prefix: params.prefix,
};
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', { error: err });
return callback(err, null, corsHeaders);
}
if (params.versions !== undefined) {
listParams.listingType = 'DelimiterVersions';
delete listParams.marker;
listParams.keyMarker = params['key-marker'];
listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined;
}
return services.getObjectListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
return callback(err, null, corsHeaders);
}
listParams.maxKeys = requestMaxKeys;
listParams.encoding = encoding;
let res = undefined;
if (listParams.listingType === 'DelimiterVersions') {
res = processVersions(bucketName, listParams, list);
} else {
res = processMasterVersions(bucketName, listParams, list);
}
pushMetric('listBucket', log, { authInfo, bucket: bucketName });
return callback(null, res, corsHeaders);
});
});
return undefined;
}
module.exports = bucketGet;

View File

@ -1,9 +1,9 @@
import aclUtils from '../utilities/aclUtils';
import constants from '../../constants';
import services from '../services';
import vault from '../auth/vault';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { pushMetric } from '../utapi/utilities';
const aclUtils = require('../utilities/aclUtils');
const constants = require('../../constants');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
// Sample XML response:
/*
@ -36,7 +36,7 @@ import { pushMetric } from '../utapi/utilities';
* with either error code or xml response body
* @return {undefined}
*/
export default function bucketGetACL(authInfo, request, log, callback) {
function bucketGetACL(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetACL' });
const bucketName = request.bucketName;
@ -45,7 +45,6 @@ export default function bucketGetACL(authInfo, request, log, callback) {
authInfo,
bucketName,
requestType: 'bucketGetACL',
log,
};
const grantInfo = {
grants: [],
@ -60,7 +59,7 @@ export default function bucketGetACL(authInfo, request, log, callback) {
constants.logId,
];
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
@ -171,3 +170,5 @@ export default function bucketGetACL(authInfo, request, log, callback) {
});
});
}
module.exports = bucketGetACL;

View File

@ -1,11 +1,11 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { convertToXml } from './apiUtils/bucket/bucketCors';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { convertToXml } = require('./apiUtils/bucket/bucketCors');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
@ -17,7 +17,7 @@ const requestType = 'bucketOwnerAction';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketGetCors(authInfo, request, log, callback) {
function bucketGetCors(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -58,3 +58,5 @@ export default function bucketGetCors(authInfo, request, log, callback) {
return callback(null, xml, corsHeaders);
});
}
module.exports = bucketGetCors;

View File

@ -0,0 +1,66 @@
const { errors } = require('arsenal');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const escapeForXML = require('../utilities/escapeForXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const requestType = 'bucketOwnerAction';
/**
* Bucket Get Location - Get bucket locationConstraint configuration
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetLocation(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) {
log.debug('metadata getbucket failed', { error: err });
return callback(err);
}
if (bucketShield(bucket, requestType)) {
return callback(errors.NoSuchBucket);
}
log.trace('found bucket in metadata');
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID)) {
log.debug('access denied for account on bucket', {
requestType,
method: 'bucketGetLocation',
});
return callback(errors.AccessDenied, null, corsHeaders);
}
let locationConstraint = bucket.getLocationConstraint();
if (!locationConstraint || locationConstraint === 'us-east-1') {
// AWS returns empty string if no region has been
// provided or for us-east-1
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
// if no locationConstraint provided.
locationConstraint = '';
}
const xml = `<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">` +
`${escapeForXML(locationConstraint)}</LocationConstraint>`;
pushMetric('getBucketLocation', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
});
}
module.exports = bucketGetLocation;

View File

@ -1,5 +1,6 @@
import services from '../services';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
// Sample XML response:
/*
@ -44,7 +45,7 @@ function convertToXml(versioningConfiguration) {
* with either error code or xml response body
* @return {undefined}
*/
export default function bucketGetVersioning(authInfo, request, log, callback) {
function bucketGetVersioning(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetVersioning' });
const bucketName = request.bucketName;
@ -53,10 +54,9 @@ export default function bucketGetVersioning(authInfo, request, log, callback) {
authInfo,
bucketName,
requestType: 'bucketOwnerAction',
log,
};
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
@ -66,11 +66,12 @@ export default function bucketGetVersioning(authInfo, request, log, callback) {
}
const versioningConfiguration = bucket.getVersioningConfiguration();
const xml = convertToXml(versioningConfiguration);
// TODO push metric for bucketGetVersioning
// pushMetric('bucketGetVersioning', log, {
// authInfo,
// bucket: bucketName,
// });
pushMetric('getBucketVersioning', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
});
}
module.exports = bucketGetVersioning;

View File

@ -1,11 +1,11 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import bucketShield from './apiUtils/bucket/bucketShield';
import { convertToXml } from './apiUtils/bucket/bucketWebsite';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { convertToXml } = require('./apiUtils/bucket/bucketWebsite');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
@ -17,7 +17,7 @@ const requestType = 'bucketOwnerAction';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketGetWebsite(authInfo, request, log, callback) {
function bucketGetWebsite(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -59,3 +59,5 @@ export default function bucketGetWebsite(authInfo, request, log, callback) {
return callback(null, xml, corsHeaders);
});
}
module.exports = bucketGetWebsite;

View File

@ -1,7 +1,7 @@
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import services from '../services';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
import { pushMetric } from '../utapi/utilities';
const { pushMetric } = require('../utapi/utilities');
/**
* Determine if bucket exists and if user has permission to access it
@ -12,16 +12,15 @@ import { pushMetric } from '../utapi/utilities';
* with either error code or success
* @return {undefined}
*/
export default function bucketHead(authInfo, request, log, callback) {
function bucketHead(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketHead' });
const bucketName = request.bucketName;
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketHead',
log,
};
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
@ -34,3 +33,5 @@ export default function bucketHead(authInfo, request, log, callback) {
return callback(null, corsHeaders);
});
}
module.exports = bucketHead;

View File

@ -1,10 +1,54 @@
import { errors } from 'arsenal';
const { waterfall } = require('async');
const { parseString } = require('xml2js');
const { auth, errors } = require('arsenal');
import { createBucket } from './apiUtils/bucket/bucketCreation';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import config from '../Config';
import aclUtils from '../utilities/aclUtils';
import { pushMetric } from '../utapi/utilities';
const vault = require('../auth/vault');
const { createBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { config } = require('../Config');
const aclUtils = require('../utilities/aclUtils');
const { pushMetric } = require('../utapi/utilities');
const { locationConstraints, restEndpoints } = config;
/**
* checkLocationConstraint - check that a location constraint is explicitly
* set on the bucket and the value of the location is listed in the
* locationConstraint config.
* Note: if data backend equals "multiple", you must set a location constraint
* @param {object} request - http request object
* @param {string} locationConstraint - the location constraint sent with
* the xml of the request
* @param {object} log - Werelogs logger
* @return {undefined}
*/
function checkLocationConstraint(request, locationConstraint, log) {
// AWS JS SDK sends a request with locationConstraint us-east-1 if
// no locationConstraint provided.
const { parsedHost } = request;
let locationConstraintChecked;
if (locationConstraint) {
locationConstraintChecked = locationConstraint;
} else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost];
} else {
log.trace('no location constraint provided on bucket put;' +
'setting us-east-1');
locationConstraintChecked = 'us-east-1';
}
if (!locationConstraints[locationConstraintChecked]) {
const errMsg = 'value of the location you are attempting to set - ' +
`${locationConstraintChecked} - is not listed in the ` +
'locationConstraint config';
log.trace(`locationConstraint is invalid - ${errMsg}`,
{ locationConstraint: locationConstraintChecked });
return { error: errors.InvalidLocationConstraint.
customizeDescription(errMsg) };
}
return { error: null, locationConstraint: locationConstraintChecked };
}
/*
Format of xml request:
@ -15,18 +59,46 @@ import { pushMetric } from '../utapi/utilities';
</CreateBucketConfiguration>
*/
function _parseXML(request, log, cb) {
if (request.post) {
return parseString(request.post, (err, result) => {
if (err || !result.CreateBucketConfiguration
|| !result.CreateBucketConfiguration.LocationConstraint
|| !result.CreateBucketConfiguration.LocationConstraint[0]) {
log.debug('request xml is malformed');
return cb(errors.MalformedXML);
}
const locationConstraint = result.CreateBucketConfiguration
.LocationConstraint[0];
log.trace('location constraint',
{ locationConstraint });
const locationCheck = checkLocationConstraint(request,
locationConstraint, log);
if (locationCheck.error) {
return cb(locationCheck.error);
}
return cb(null, locationCheck.locationConstraint);
});
}
return process.nextTick(() => {
const locationCheck = checkLocationConstraint(request,
undefined, log);
if (locationCheck.error) {
return cb(locationCheck.error);
}
return cb(null, locationCheck.locationConstraint);
});
}
/**
* PUT Service - Create bucket for the user
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {string | undefined} locationConstraint - locationConstraint for
* bucket (if any)
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketPut(authInfo, request, locationConstraint, log,
callback) {
function bucketPut(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPut' });
if (authInfo.isRequesterPublicUser()) {
@ -37,21 +109,68 @@ export default function bucketPut(authInfo, request, locationConstraint, log,
log.trace('invalid acl header');
return callback(errors.InvalidArgument);
}
const bucketName = request.bucketName;
const { bucketName } = request;
return createBucket(authInfo, bucketName, request.headers,
locationConstraint, config.usEastBehavior, log,
(err, previousBucket) => {
// if bucket already existed, gather any relevant cors headers
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, previousBucket);
return waterfall([
next => _parseXML(request, log, next),
// Check policies in Vault for a user.
(locationConstraint, next) => {
if (authInfo.isRequesterAnIAMUser()) {
const authParams = auth.server.extractParams(request, log, 's3',
request.query);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketName,
specificResource: {
key: '',
},
requesterIp: request.socket.remoteAddress,
sslEnabled: request.connection.encrypted,
apiMethod: 'bucketPut',
awsService: 's3',
locationConstraint,
requesterInfo: authInfo,
signatureVersion: authParams.params.data.authType,
authType: authParams.params.data.signatureVersion,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return callback(err, corsHeaders);
return next(err);
}
if (authorizationResults[0].isAllowed !== true) {
log.trace('authorization check failed for user',
{ locationConstraint });
return next(errors.AccessDenied);
}
return next(null, locationConstraint);
});
}
return next(null, locationConstraint);
},
(locationConstraint, next) => createBucket(authInfo, bucketName,
request.headers, locationConstraint, log, (err, previousBucket) => {
// if bucket already existed, gather any relevant cors
// headers
const corsHeaders = collectCorsHeaders(
request.headers.origin, request.method, previousBucket);
if (err) {
return next(err, corsHeaders);
}
pushMetric('createBucket', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
return next(null, corsHeaders);
}),
], callback);
}
module.exports = {
checkLocationConstraint,
bucketPut,
};

View File

@ -1,14 +1,14 @@
import { errors } from 'arsenal';
import async from 'async';
const async = require('async');
const { errors } = require('arsenal');
import acl from '../metadata/acl';
import aclUtils from '../utilities/aclUtils';
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import services from '../services';
import vault from '../auth/vault';
import { pushMetric } from '../utapi/utilities';
const acl = require('../metadata/acl');
const aclUtils = require('../utilities/aclUtils');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault');
const { pushMetric } = require('../utapi/utilities');
/*
Format of xml request:
@ -40,7 +40,7 @@ import { pushMetric } from '../utapi/utilities';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketPutACL(authInfo, request, log, callback) {
function bucketPutACL(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutACL' });
const bucketName = request.bucketName;
@ -72,7 +72,6 @@ export default function bucketPutACL(authInfo, request, log, callback) {
authInfo,
bucketName,
requestType: 'bucketPutACL',
log,
};
const possibleGrants = ['FULL_CONTROL', 'WRITE',
'WRITE_ACP', 'READ', 'READ_ACP'];
@ -103,12 +102,12 @@ export default function bucketPutACL(authInfo, request, log, callback) {
return async.waterfall([
function waterfall1(next) {
services.metadataValidateAuthorization(metadataValParams,
metadataValidateBucket(metadataValParams, log,
(err, bucket) => {
if (err) {
log.trace('request authorization failed', {
error: err,
method: 'services.metadataValidateAuthorization',
method: 'metadataValidateBucket',
});
return next(err, bucket);
}
@ -295,3 +294,5 @@ export default function bucketPutACL(authInfo, request, log, callback) {
return callback(err, corsHeaders);
});
}
module.exports = bucketPutACL;

View File

@ -1,14 +1,13 @@
import crypto from 'crypto';
const crypto = require('crypto');
const async = require('async');
const { errors } = require('arsenal');
import async from 'async';
import { errors } from 'arsenal';
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { parseCorsXml } from './apiUtils/bucket/bucketCors';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { parseCorsXml } = require('./apiUtils/bucket/bucketCors');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
@ -20,7 +19,7 @@ const requestType = 'bucketOwnerAction';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketPutCors(authInfo, request, log, callback) {
function bucketPutCors(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutCors' });
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -92,3 +91,5 @@ export default function bucketPutCors(authInfo, request, log, callback) {
return callback(err, corsHeaders);
});
}
module.exports = bucketPutCors;

View File

@ -0,0 +1,71 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const getReplicationConfiguration =
require('./apiUtils/bucket/getReplicationConfiguration');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
// The error response when a bucket does not have versioning 'Enabled'.
const versioningNotEnabledError = errors.InvalidRequest.customizeDescription(
'Versioning must be \'Enabled\' on the bucket to apply a replication ' +
'configuration');
/**
* bucketPutReplication - Create or update bucket replication configuration
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketPutReplication(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutReplication' });
const { bucketName, post, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketOwnerAction',
};
return waterfall([
// Validate the request XML and return the replication configuration.
next => getReplicationConfiguration(post, log, next),
// Check bucket user privileges and ensure versioning is 'Enabled'.
(config, next) =>
// TODO: Validate that destination bucket exists and has versioning.
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
if (err) {
return next(err);
}
// Replication requires that versioning is 'Enabled'.
if (!bucket.isVersioningEnabled(bucket)) {
return next(versioningNotEnabledError);
}
return next(null, config, bucket);
}),
// Set the replication configuration and update the bucket metadata.
(config, bucket, next) => {
bucket.setReplicationConfiguration(config);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.trace('error processing request', {
error: err,
method: 'bucketPutReplication',
});
return callback(err, corsHeaders);
}
pushMetric('putBucketReplication', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketPutReplication;

View File

@ -1,9 +1,11 @@
import { waterfall } from 'async';
import { parseString } from 'xml2js';
const { waterfall } = require('async');
const { parseString } = require('xml2js');
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import metadata from '../metadata/wrapper';
import services from '../services';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
/**
* Format of xml request:
@ -17,6 +19,37 @@ import services from '../services';
x-amz-mfa: [SerialNumber] [TokenCode]
*/
function _parseXML(request, log, cb) {
if (request.post === '') {
log.debug('request xml is missing');
return cb(errors.MalformedXML);
}
return parseString(request.post, (err, result) => {
if (err) {
log.debug('request xml is malformed');
return cb(errors.MalformedXML);
}
const versioningConf = result.VersioningConfiguration;
const status = versioningConf.Status ?
versioningConf.Status[0] : undefined;
const mfaDelete = versioningConf.MfaDelete ?
versioningConf.MfaDelete[0] : undefined;
const validStatuses = ['Enabled', 'Suspended'];
const validMfaDeletes = [undefined, 'Enabled', 'Disabled'];
if (validStatuses.indexOf(status) < 0 ||
validMfaDeletes.indexOf(mfaDelete) < 0) {
log.debug('illegal versioning configuration');
return cb(errors.IllegalVersioningConfigurationException);
}
if (versioningConf && mfaDelete === 'Enabled') {
log.debug('mfa deletion is not implemented');
return cb(errors.NotImplemented
.customizeDescription('MFA Deletion is not supported yet.'));
}
return process.nextTick(() => cb(null));
});
}
/**
* Bucket Put Versioning - Create or update bucket Versioning
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -25,7 +58,7 @@ import services from '../services';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketPutVersioning(authInfo, request, log, callback) {
function bucketPutVersioning(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutVersioning' });
const bucketName = request.bucketName;
@ -33,11 +66,11 @@ export default function bucketPutVersioning(authInfo, request, log, callback) {
authInfo,
bucketName,
requestType: 'bucketOwnerAction',
log,
};
return waterfall([
next => services.metadataValidateAuthorization(metadataValParams,
next => _parseXML(request, log, next),
next => metadataValidateBucket(metadataValParams, log,
(err, bucket) => next(err, bucket)), // ignore extra null object,
(bucket, next) => parseString(request.post, (err, result) => {
// just for linting; there should not be any parsing error here
@ -68,14 +101,14 @@ export default function bucketPutVersioning(authInfo, request, log, callback) {
if (err) {
log.trace('error processing request', { error: err,
method: 'bucketPutVersioning' });
} else {
pushMetric('putBucketVersioning', log, {
authInfo,
bucket: bucketName,
});
}
// TODO push metrics for bucketPutVersioning
// else {
// pushMetric('bucketPutVersioning', log, {
// authInfo,
// bucket: bucketName,
// }
// }
return callback(err, corsHeaders);
});
}
module.exports = bucketPutVersioning;

View File

@ -1,12 +1,12 @@
import { errors } from 'arsenal';
import async from 'async';
const async = require('async');
const { errors } = require('arsenal');
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import metadata from '../metadata/wrapper';
import { parseWebsiteConfigXml } from './apiUtils/bucket/bucketWebsite';
import { pushMetric } from '../utapi/utilities';
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const metadata = require('../metadata/wrapper');
const { parseWebsiteConfigXml } = require('./apiUtils/bucket/bucketWebsite');
const { pushMetric } = require('../utapi/utilities');
const requestType = 'bucketOwnerAction';
@ -18,7 +18,7 @@ const requestType = 'bucketOwnerAction';
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function bucketPutWebsite(authInfo, request, log, callback) {
function bucketPutWebsite(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutWebsite' });
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -76,3 +76,5 @@ export default function bucketPutWebsite(authInfo, request, log, callback) {
return callback(err, corsHeaders);
});
}
module.exports = bucketPutWebsite;

View File

@ -1,17 +1,23 @@
import { errors } from 'arsenal';
import async from 'async';
import crypto from 'crypto';
import { parseString } from 'xml2js';
import escapeForXML from '../utilities/escapeForXML';
import { pushMetric } from '../utapi/utilities';
const crypto = require('crypto');
const async = require('async');
const { parseString } = require('xml2js');
const { errors, versioning } = require('arsenal');
import data from '../data/wrapper';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import metadata from '../metadata/wrapper';
import services from '../services';
const escapeForXML = require('../utilities/escapeForXML');
const { pushMetric } = require('../utapi/utilities');
import { logger } from '../utilities/logger';
const data = require('../data/wrapper');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const { versioningPreprocessing, checkQueryVersionId }
= require('./apiUtils/object/versioning');
const metadata = require('../metadata/wrapper');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const logger = require('../utilities/logger');
const versionIdUtils = versioning.VersionID;
/*
Format of xml request:
@ -79,7 +85,6 @@ const _convertToXml = xmlParams => {
* @param {function} callback - callback to server
* @return {undefined}
*/
export default
function completeMultipartUpload(authInfo, request, log, callback) {
log.debug('processing request', { method: 'completeMultipartUpload' });
const bucketName = request.bucketName;
@ -102,6 +107,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
hostname,
};
const queryContainsVersionId = checkQueryVersionId(request.query);
if (queryContainsVersionId instanceof Error) {
return callback(queryContainsVersionId);
}
function parseXml(xmlToParse, next) {
return parseString(xmlToParse, (err, result) => {
if (err || !result || !result.CompleteMultipartUpload
@ -113,8 +123,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
});
}
async.waterfall([
function waterfall1(next) {
return async.waterfall([
function validateDestBucket(next) {
const metadataValParams = {
objectKey,
authInfo,
@ -122,11 +132,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
// Required permissions for this action
// at the destinationBucket level are same as objectPut
requestType: 'objectPut',
log,
};
services.metadataValidateAuthorization(metadataValParams, next);
metadataValidateBucketAndObj(metadataValParams, log, next);
},
function waterfall2(destBucket, objMD, next) {
function validateMultipart(destBucket, objMD, next) {
services.metadataValidateMultipart(metadataValParams,
(err, mpuBucket) => {
if (err) {
@ -135,7 +144,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
return next(null, destBucket, objMD, mpuBucket);
});
},
function waterfall3(destBucket, objMD, mpuBucket, next) {
function parsePartsList(destBucket, objMD, mpuBucket, next) {
if (request.post) {
return parseXml(request.post, (err, jsonList) => {
if (err) {
@ -146,7 +155,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
return next(errors.MalformedXML, destBucket);
},
function waterfall4(destBucket, objMD, mpuBucket, jsonList, next) {
function retrieveParts(destBucket, objMD, mpuBucket, jsonList, next) {
services.getMPUparts(mpuBucket.getName(), uploadId, log,
(err, result) => {
if (err) {
@ -157,8 +166,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
storedParts, jsonList);
});
},
function waterfall5(destBucket, objMD, mpuBucket, storedParts, jsonList,
next) {
function processParts(destBucket, objMD, mpuBucket, storedParts,
jsonList, next) {
const storedPartsAsObjects = storedParts.map(item => ({
// In order to delete the part listing in the shadow
// bucket, need the full key
@ -211,7 +220,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
if (!location || typeof location !== 'object') {
return;
}
extraPartLocations.push({ key: location.key });
extraPartLocations.push(location);
});
});
}
@ -323,7 +332,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
`overview${splitter}${objectKey}${splitter}${uploadId}`;
return metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
log, (err, storedMetadata) => {
{}, log, (err, storedMetadata) => {
if (err) {
return next(err, destBucket);
}
@ -333,7 +342,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
storedPartsAsObjects, extraPartLocations);
});
},
function waterfall6(destBucket, objMD, mpuBucket, storedMetadata,
function prepForStoring(destBucket, objMD, mpuBucket, storedMetadata,
aggregateETag, calculatedSize, dataLocations, mpuOverviewKey,
storedPartsAsObjects, extraPartLocations, next) {
const metaHeaders = {};
@ -363,59 +372,87 @@ function completeMultipartUpload(authInfo, request, log, callback) {
multipart: true,
log,
};
next(null, destBucket, dataLocations, metaStoreParams,
mpuBucket, mpuOverviewKey, aggregateETag,
storedPartsAsObjects, objMD, extraPartLocations);
},
function waterfall7(destinationBucket, dataLocations,
metaStoreParams, mpuBucket, mpuOverviewKey,
aggregateETag, storedPartsAsObjects, objMD,
extraPartLocations, next) {
const serverSideEncryption =
destinationBucket.getServerSideEncryption();
destBucket.getServerSideEncryption();
let pseudoCipherBundle = null;
if (serverSideEncryption) {
pseudoCipherBundle = {
algorithm: destinationBucket.getSseAlgorithm(),
masterKeyId: destinationBucket.getSseMasterKeyId(),
algorithm: destBucket.getSseAlgorithm(),
masterKeyId: destBucket.getSseMasterKeyId(),
};
}
services.metadataStoreObject(destinationBucket.getName(),
dataLocations, pseudoCipherBundle, metaStoreParams, err => {
return versioningPreprocessing(bucketName,
destBucket, objectKey, objMD, log, (err, options) => {
if (err) {
// TODO: check AWS error when user requested a specific
// version before any versions have been put
const logLvl = err === errors.BadRequest ?
'debug' : 'error';
log[logLvl]('error getting versioning info', {
error: err,
method: 'versioningPreprocessing',
});
return next(err, destBucket);
}
const dataToDelete = options.dataToDelete;
metaStoreParams.versionId = options.versionId;
metaStoreParams.versioning = options.versioning;
metaStoreParams.isNull = options.isNull;
metaStoreParams.nullVersionId = options.nullVersionId;
return next(null, destBucket, dataLocations,
metaStoreParams, mpuBucket, mpuOverviewKey,
aggregateETag, storedPartsAsObjects, objMD,
extraPartLocations, pseudoCipherBundle, dataToDelete);
});
},
function storeAsNewObj(destinationBucket, dataLocations,
metaStoreParams, mpuBucket, mpuOverviewKey, aggregateETag,
storedPartsAsObjects, objMD, extraPartLocations, pseudoCipherBundle,
dataToDelete, next) {
return services.metadataStoreObject(destinationBucket.getName(),
dataLocations, pseudoCipherBundle, metaStoreParams,
(err, res) => {
if (err) {
return next(err, destinationBucket);
}
if (objMD && objMD.location) {
const dataToDelete = Array.isArray(objMD.location) ?
objMD.location : [objMD.location];
data.batchDelete(dataToDelete, logger
.newRequestLoggerFromSerializedUids(log
const generatedVersionId = res ? res.versionId : undefined;
// in cases where completing mpu overwrites a previous
// null version when versioning is suspended or versioning
// is not enabled, need to delete pre-existing data
if (dataToDelete) {
data.batchDelete(dataToDelete, request.method, null,
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids()));
}
return next(null, mpuBucket, mpuOverviewKey,
aggregateETag, storedPartsAsObjects,
extraPartLocations, destinationBucket);
return next(null, mpuBucket, mpuOverviewKey, aggregateETag,
storedPartsAsObjects, extraPartLocations,
destinationBucket, generatedVersionId);
});
},
function waterfall8(mpuBucket, mpuOverviewKey, aggregateETag,
storedPartsAsObjects, extraPartLocations, destinationBucket, next) {
function deletePartsMetadata(mpuBucket, mpuOverviewKey, aggregateETag,
storedPartsAsObjects, extraPartLocations, destinationBucket,
generatedVersionId, next) {
const keysToDelete = storedPartsAsObjects.map(item => item.key);
keysToDelete.push(mpuOverviewKey);
services.batchDeleteObjectMetadata(mpuBucket.getName(),
keysToDelete, log, err => next(err, destinationBucket,
aggregateETag));
aggregateETag, generatedVersionId));
if (extraPartLocations.length > 0) {
data.batchDelete(extraPartLocations, logger
.newRequestLoggerFromSerializedUids(log
data.batchDelete(extraPartLocations, request.method, null,
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids()));
}
},
], (err, destinationBucket, aggregateETag) => {
const corsHeaders =
], (err, destinationBucket, aggregateETag, generatedVersionId) => {
const resHeaders =
collectCorsHeaders(request.headers.origin, request.method,
destinationBucket);
if (err) {
return callback(err, null, corsHeaders);
return callback(err, null, resHeaders);
}
if (generatedVersionId) {
resHeaders['x-amz-version-id'] =
versionIdUtils.encode(generatedVersionId);
}
xmlParams.ETag = `"${aggregateETag}"`;
const xml = _convertToXml(xmlParams);
@ -423,6 +460,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
return callback(null, xml, resHeaders);
});
}
module.exports = completeMultipartUpload;

View File

@ -1,10 +1,10 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import metadata from '../metadata/wrapper';
import bucketShield from './apiUtils/bucket/bucketShield';
import { findCorsRule,
generateCorsResHeaders } from './apiUtils/object/corsResponse';
// import { pushMetric } from '../utapi/utilities';
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { findCorsRule, generateCorsResHeaders }
= require('./apiUtils/object/corsResponse');
// const { pushMetric } = require('../utapi/utilities');
const requestType = 'objectGet';
@ -23,7 +23,7 @@ const customizedErrs = {
* with either error code or 200 response
* @return {undefined}
*/
export default function corsPreflight(request, log, callback) {
function corsPreflight(request, log, callback) {
log.debug('processing request', { method: 'corsPreflight' });
const bucketName = request.bucketName;
@ -81,3 +81,5 @@ export default function corsPreflight(request, log, callback) {
return callback(null, resHeaders);
});
}
module.exports = corsPreflight;

View File

@ -1,12 +1,19 @@
import UUID from 'node-uuid';
import escapeForXML from '../utilities/escapeForXML';
import { pushMetric } from '../utapi/utilities';
import { errors } from 'arsenal';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
import constants from '../../constants';
import services from '../services';
import utils from '../utils';
const UUID = require('node-uuid');
const { errors, s3validators } = require('arsenal');
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
const escapeForXML = require('../utilities/escapeForXML');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader;
/*
Sample xml response:
@ -52,7 +59,6 @@ const _convertToXml = xmlParams => {
* @return {undefined} calls callback from router
* with err and result as arguments
*/
export default
function initiateMultipartUpload(authInfo, request, log, callback) {
log.debug('processing request', { method: 'initiateMultipartUpload' });
const bucketName = request.bucketName;
@ -65,13 +71,20 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
// multipart upload object with a key containing the splitter.
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
if (!validateWebsiteHeader(websiteRedirectHeader)) {
const err = errors.InvalidRedirectLocation;
log.debug('invalid x-amz-website-redirect-location' +
`value ${websiteRedirectHeader}`, { error: err });
return callback(err);
}
const metaHeaders = utils.getMetaHeaders(request.headers);
const metaHeaders = getMetaHeaders(request.headers);
if (metaHeaders instanceof Error) {
log.debug('user metadata validation failed', {
error: metaHeaders,
method: 'createAndStoreObject',
});
return process.nextTick(() => callback(metaHeaders));
}
// Generate uniqueID without dashes so that routing not messed up
const uploadId = UUID.v4().replace(/-/g, '');
// TODO: Add this as a utility function for all object put requests
@ -91,7 +104,6 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
bucketName,
// Required permissions for this action are same as objectPut
requestType: 'objectPut',
log,
};
const accountCanonicalID = authInfo.getCanonicalID();
let initiatorID = accountCanonicalID;
@ -100,6 +112,14 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
initiatorID = authInfo.getArn();
initiatorDisplayName = authInfo.getIAMdisplayName();
}
const xmlParams = {
bucketName,
objectKey,
uploadId,
};
const xml = _convertToXml(xmlParams);
function _storetheMPObject(destinationBucket, corsHeaders) {
const metadataStoreParams = {
objectKey,
uploadId,
@ -111,7 +131,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
ownerID: accountCanonicalID,
ownerDisplayName: authInfo.getAccountDisplayName(),
// If initiator is an IAM user, the initiatorID is the ARN.
// Otherwise, it is the same as the ownerID (the account canonicalID)
// Otherwise, it is the same as the ownerID
// (the account canonicalID)
initiatorID,
// If initiator is an IAM user, the initiatorDisplayName is the
// IAM user's displayname.
@ -119,14 +140,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
initiatorDisplayName,
splitter: constants.splitter,
};
const xmlParams = {
bucketName,
objectKey,
uploadId,
};
const xml = _convertToXml(xmlParams);
function _storetheMPObject(destinationBucket, corsHeaders) {
const serverSideEncryption =
destinationBucket.getServerSideEncryption();
let cipherBundle = null;
@ -136,7 +150,17 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
masterKeyId: serverSideEncryption.masterKeyId,
};
}
services.getMPUBucket(destinationBucket, bucketName, log,
const backendInfoObj = locationConstraintCheck(request, null,
destinationBucket, log);
if (backendInfoObj.err) {
return process.nextTick(() => {
callback(backendInfoObj.err);
});
}
metadataStoreParams.controllingLocationConstraint =
backendInfoObj.controllingLC;
return services.getMPUBucket(destinationBucket, bucketName, log,
(err, MPUbucket) => {
if (err) {
log.trace('error getting MPUbucket', {
@ -168,14 +192,14 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
});
}
services.metadataValidateAuthorization(metadataValParams,
metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destinationBucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'services.metadataValidateAuthorization',
method: 'metadataValidateBucketAndObj',
});
return callback(err, null, corsHeaders);
}
@ -187,11 +211,13 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
}
if (destinationBucket.hasTransientFlag() ||
destinationBucket.hasDeletedFlag()) {
log.trace('transient or deleted flag so cleaning up bucket');
log.trace('transient or deleted flag so cleaning ' +
'up bucket');
return cleanUpBucket(destinationBucket,
accountCanonicalID, log, err => {
if (err) {
log.debug('error cleaning up bucket with flag',
log.debug('error cleaning up bucket ' +
'with flag',
{ error: err,
transientFlag:
destinationBucket.hasTransientFlag(),
@ -212,3 +238,5 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
});
return undefined;
}
module.exports = initiateMultipartUpload;

View File

@ -1,12 +1,14 @@
import async from 'async';
import escapeForXML from '../utilities/escapeForXML';
import querystring from 'querystring';
const querystring = require('querystring');
const async = require('async');
const escapeForXML = require('../utilities/escapeForXML');
const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const { errors } = require('arsenal');
import constants from '../../constants';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import services from '../services';
import { pushMetric } from '../utapi/utilities';
import { errors } from 'arsenal';
// Sample XML response:
/*
@ -159,8 +161,7 @@ const _convertToXml = xmlParams => {
* with either error code or xml response body
* @return {undefined}
*/
export default function listMultipartUploads(authInfo,
request, log, callback) {
function listMultipartUploads(authInfo, request, log, callback) {
log.debug('processing request', { method: 'listMultipartUploads' });
const query = request.query;
const bucketName = request.bucketName;
@ -178,14 +179,13 @@ export default function listMultipartUploads(authInfo,
// the authorization to list multipart uploads is the same
// as listing objects in a bucket.
requestType: 'bucketGet',
log,
};
async.waterfall([
function waterfall1(next) {
// Check final destination bucket for authorization rather
// than multipart upload bucket
services.metadataValidateAuthorization(metadataValParams,
metadataValidateBucket(metadataValParams, log,
(err, bucket) => next(err, bucket));
},
function getMPUBucket(bucket, next) {
@ -213,7 +213,7 @@ export default function listMultipartUploads(authInfo,
maxKeys: maxUploads,
prefix: `overview${splitter}${prefix}`,
queryPrefixLength: prefix.length,
listingType: 'multipartuploads',
listingType: 'MPU',
splitter,
};
services.getMultipartUploadListing(mpuBucketName, listingParams,
@ -242,3 +242,5 @@ export default function listMultipartUploads(authInfo,
return callback(null, xml, corsHeaders);
});
}
module.exports = listMultipartUploads;

View File

@ -1,12 +1,14 @@
import async from 'async';
import querystring from 'querystring';
const querystring = require('querystring');
const async = require('async');
const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXML = require('../utilities/escapeForXML');
const { pushMetric } = require('../utapi/utilities');
const { errors } = require('arsenal');
import constants from '../../constants';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import services from '../services';
import escapeForXML from '../utilities/escapeForXML';
import { pushMetric } from '../utapi/utilities';
import { errors } from 'arsenal';
/*
Format of xml response:
@ -67,7 +69,7 @@ function buildXML(xmlParams, xml, encodingFn) {
* @param {function} callback - callback to server
* @return {undefined}
*/
export default function listParts(authInfo, request, log, callback) {
function listParts(authInfo, request, log, callback) {
log.debug('processing request', { method: 'listParts' });
const bucketName = request.bucketName;
@ -91,7 +93,6 @@ export default function listParts(authInfo, request, log, callback) {
objectKey,
uploadId,
requestType: 'listParts',
log,
};
// For validating the request at the destinationBucket level
// params are the same as validating at the MPU level
@ -106,7 +107,7 @@ export default function listParts(authInfo, request, log, callback) {
async.waterfall([
function checkDestBucketVal(next) {
services.metadataValidateAuthorization(metadataValParams,
metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => {
if (err) {
return next(err, destinationBucket, null);
@ -125,6 +126,7 @@ export default function listParts(authInfo, request, log, callback) {
});
},
function waterfall2(destBucket, next) {
metadataValMPUparams.log = log;
services.metadataValidateMultipart(metadataValMPUparams,
(err, mpuBucket, mpuOverview) => {
if (err) {
@ -233,3 +235,5 @@ export default function listParts(authInfo, request, log, callback) {
});
return undefined;
}
module.exports = listParts;

View File

@ -1,17 +1,23 @@
import crypto from 'crypto';
const crypto = require('crypto');
import async from 'async';
import { auth, errors } from 'arsenal';
import { parseString } from 'xml2js';
const async = require('async');
const { parseString } = require('xml2js');
const { auth, errors, versioning } = require('arsenal');
import escapeForXML from '../utilities/escapeForXML';
import { pushMetric } from '../utapi/utilities';
import bucketShield from './apiUtils/bucket/bucketShield';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import metadata from '../metadata/wrapper';
import services from '../services';
import vault from '../auth/vault';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
const escapeForXML = require('../utilities/escapeForXML');
const { pushMetric } = require('../utapi/utilities');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const services = require('../services');
const vault = require('../auth/vault');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { metadataGetObject } = require('../metadata/metadataUtils');
const versionIdUtils = versioning.VersionID;
/*
@ -50,8 +56,12 @@ import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
* @param {boolean} quietSetting - true if xml should just include error list
* and false if should include deleted list and error list
* @param {object []} errorResults - list of error result objects with each
* object containing -- key: objectName, error: arsenal error
* @param {string []} deleted - list of object keys deleted
* object containing -- entry: { key, versionId }, error: arsenal error
* @param {object []} deleted - list of object deleted, an object has the format
* object: { entry, isDeleteMarker, isDeletingDeleteMarker }
* object.entry : above
* object.newDeleteMarker: if deletion resulted in delete marker
* object.isDeletingDeleteMarker: if a delete marker was deleted
* @return {string} xml string
*/
function _formatXML(quietSetting, errorResults, deleted) {
@ -59,9 +69,17 @@ function _formatXML(quietSetting, errorResults, deleted) {
errorResults.forEach(errorObj => {
errorXML.push(
'<Error>',
'<Key>', escapeForXML(errorObj.key), '</Key>',
'<Code>', errorObj.error.message, '</Code>',
'<Message>', errorObj.error.description, '</Message>',
'<Key>', escapeForXML(errorObj.entry.key), '</Key>',
'<Code>', escapeForXML(errorObj.error.message), '</Code>');
if (errorObj.entry.versionId) {
const version = errorObj.entry.versionId === 'null' ?
'null' : escapeForXML(errorObj.entry.versionId);
errorXML.push('<VersionId>', version, '</VersionId>');
}
errorXML.push(
'<Message>',
escapeForXML(errorObj.error.description),
'</Message>',
'</Error>'
);
});
@ -79,12 +97,34 @@ function _formatXML(quietSetting, errorResults, deleted) {
return xml.join('');
}
const deletedXML = [];
deleted.forEach(objKey => {
deleted.forEach(version => {
const isDeleteMarker = version.isDeleteMarker;
const deleteMarkerVersionId = version.deleteMarkerVersionId;
// if deletion resulted in new delete marker or deleting a delete marker
deletedXML.push(
'<Deleted>',
'<Key>', escapeForXML(objKey), '</Key>',
'</Deleted>'
'<Key>',
escapeForXML(version.entry.key),
'</Key>'
);
if (version.entry.versionId) {
deletedXML.push(
'<VersionId>',
escapeForXML(version.entry.versionId),
'</VersionId>'
);
}
if (isDeleteMarker) {
deletedXML.push(
'<DeleteMarker>',
isDeleteMarker,
'</DeleteMarker>',
'<DeleteMarkerVersionId>',
deleteMarkerVersionId,
'</DeleteMarkerVersionId>'
);
}
deletedXML.push('</Deleted>');
});
xml[2] = deletedXML.join('');
return xml.join('');
@ -98,15 +138,34 @@ function _parseXml(xmlToParse, next) {
const json = result.Delete;
// not quiet is the default if nothing specified
const quietSetting = json.Quiet && json.Quiet[0] === 'true';
// format of json is {"Object":[{"Key":["test1"]},{"Key":["test2"]}]}
const objects = json.Object.map(item => item.Key[0]);
// format of json is
// {"Object":[
// {"Key":["test1"],"VersionId":["vid"]},
// {"Key":["test2"]}
// ]}
const objects = [];
for (let i = 0; i < json.Object.length; i++) {
const item = json.Object[i];
if (!item.Key) {
return next(errors.MalformedXML);
}
const object = { key: item.Key[0] };
if (item.VersionId) {
object.versionId = item.VersionId[0];
}
objects.push(object);
}
return next(null, quietSetting, objects);
});
}
/**
* gets object metadata and deletes object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {string} canonicalID - canonicalId of requester
* @param {object} request - http request
* @param {string} bucketName - bucketName
* @param {BucketInfo} bucket - bucket
* @param {boolean} quietSetting - true if xml should just include error list
* and false if should include deleted list and error list
* @param {object []} errorResults - list of error result objects with each
@ -118,58 +177,114 @@ function _parseXml(xmlToParse, next) {
* @callback called with (err, quietSetting, errorResults, numOfObjects,
* successfullyDeleted, totalContentLengthDeleted)
*/
export function getObjMetadataAndDelete(bucketName, quietSetting,
errorResults, inPlay, log, next) {
function getObjMetadataAndDelete(authInfo, canonicalID, request,
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
const successfullyDeleted = [];
let totalContentLengthDeleted = 0;
let numOfObjects = 0;
// for obj deletes, no need to check acl's at object level
// (authority is at the bucket level for obj deletes)
let numOfObjectsRemoved = 0;
const skipError = new Error('skip');
// doing 5 requests at a time. note that the data wrapper
// will do 5 parallel requests to data backend to delete parts
return async.forEachLimit(inPlay, 5, (key, moveOn) => {
metadata.getObjectMD(bucketName, key, log, (err, objMD) => {
return async.forEachLimit(inPlay, 5, (entry, moveOn) => {
async.waterfall([
callback => {
let decodedVersionId;
if (entry.versionId) {
decodedVersionId = entry.versionId === 'null' ?
'null' : versionIdUtils.decode(entry.versionId);
}
if (decodedVersionId instanceof Error) {
return callback(errors.NoSuchVersion);
}
return callback(null, decodedVersionId);
},
// for obj deletes, no need to check acl's at object level
// (authority is at the bucket level for obj deletes)
(versionId, callback) => metadataGetObject(bucketName, entry.key,
versionId, log, (err, objMD) => {
// if general error from metadata return error
if (err && !err.NoSuchKey) {
log.error('error getting object MD', { error: err, key });
errorResults.push({
key,
error: err,
});
return moveOn();
return callback(err);
}
// if particular key does not exist, AWS returns success
// for key so add to successfullyDeleted list and move on
if (err && err.NoSuchKey) {
successfullyDeleted.push(key);
const verCfg = bucket.getVersioningConfiguration();
// To adhere to AWS behavior, create a delete marker
// if trying to delete an object that does not exist
// when versioning has been configured
if (verCfg && !entry.versionId) {
log.debug('trying to delete specific version ' +
' that does not exist');
return callback(null, objMD, versionId);
}
// otherwise if particular key does not exist, AWS
// returns success for key so add to successfullyDeleted
// list and move on
successfullyDeleted.push({ entry });
return callback(skipError);
}
return callback(null, objMD, versionId);
}),
(objMD, versionId, callback) =>
preprocessingVersioningDelete(bucketName, bucket, objMD,
versionId, log, (err, options) => callback(err, options,
objMD)),
(options, objMD, callback) => {
const deleteInfo = {};
if (options && options.deleteData) {
deleteInfo.deleted = true;
return services.deleteObject(bucketName, objMD,
entry.key, options, log, err =>
callback(err, objMD, deleteInfo));
}
deleteInfo.newDeleteMarker = true;
// This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, (err, result) =>
callback(err, objMD, deleteInfo, result.versionId));
},
], (err, objMD, deleteInfo, versionId) => {
if (err === skipError) {
return moveOn();
} else if (err) {
log.error('error deleting object', { error: err, entry });
errorResults.push({ entry, error: err });
return moveOn();
}
return services.deleteObject(bucketName, objMD, key, log,
err => {
if (err) {
log.error('error deleting object', { error: err, key });
errorResults.push({
key,
error: err,
});
return moveOn();
if (deleteInfo.deleted && objMD['content-length']) {
numOfObjectsRemoved++;
totalContentLengthDeleted += objMD['content-length'];
}
if (objMD['content-length']) {
totalContentLengthDeleted +=
objMD['content-length'];
let isDeleteMarker;
let deleteMarkerVersionId;
// - If trying to delete an object that does not exist (if a new
// delete marker was created)
// - Or if an object exists but no version was specified
// return DeleteMarkerVersionId equals the versionID of the marker
// you just generated and DeleteMarker tag equals true
if (deleteInfo.newDeleteMarker) {
isDeleteMarker = true;
deleteMarkerVersionId = versionIdUtils.encode(versionId);
// In this case we are putting a new object (i.e., the delete
// marker), so we decrement the numOfObjectsRemoved value.
numOfObjectsRemoved--;
// If trying to delete a delete marker, DeleteMarkerVersionId equals
// deleteMarker's versionID and DeleteMarker equals true
} else if (objMD && objMD.isDeleteMarker) {
isDeleteMarker = true;
deleteMarkerVersionId = entry.versionId;
}
numOfObjects++;
successfullyDeleted.push(key);
successfullyDeleted.push({ entry, isDeleteMarker,
deleteMarkerVersionId });
return moveOn();
});
});
},
// end of forEach func
err => {
log.trace('finished deleting objects', { numOfObjects });
return next(err, quietSetting, errorResults, numOfObjects,
successfullyDeleted, totalContentLengthDeleted);
log.trace('finished deleting objects', { numOfObjectsRemoved });
return next(err, quietSetting, errorResults, numOfObjectsRemoved,
successfullyDeleted, totalContentLengthDeleted, bucket);
});
}
@ -177,7 +292,7 @@ export function getObjMetadataAndDelete(bucketName, quietSetting,
* multiObjectDelete - Delete multiple objects
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http.IncomingMessage as modified by
* lib/utils.normalizeRequest and routes/routePOST.js
* lib/utils and routes/routePOST.js
* @param {object} request.headers - request headers
* @param {object} request.query - query from request
* @param {string} request.post - concatenation of request body
@ -188,7 +303,6 @@ export function getObjMetadataAndDelete(bucketName, quietSetting,
* @param {function} callback - callback to server
* @return {undefined}
*/
export default
function multiObjectDelete(authInfo, request, log, callback) {
log.debug('processing request', { method: 'multiObjectDelete' });
if (!request.post) {
@ -205,7 +319,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
return async.waterfall([
function parseXML(next) {
return _parseXml(request.post, (err, quietSetting, objects) => {
return _parseXml(request.post,
(err, quietSetting, objects) => {
if (err || objects.length < 1 || objects.length > 1000) {
return next(errors.MalformedXML);
}
@ -213,11 +328,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
});
},
function checkPolicies(quietSetting, objects, next) {
// track the error results for any keys with
// an error response
const errorResults = [];
// track keys that are still on track to be deleted
const inPlay = [];
const errorResults = [];
// if request from account, no need to check policies
// all objects are inPlay so send array of object keys
// as inPlay argument
@ -246,16 +359,22 @@ function multiObjectDelete(authInfo, request, log, callback) {
signatureAge: authParams.params.data.signatureAge,
},
parameterize: {
specificResource: objects,
// eslint-disable-next-line
specificResource: objects.map(entry => {
return {
key: entry.key,
versionId: entry.versionId,
};
}),
},
};
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
log, (err, authorizationResults) => {
// there were no policies so received a blanket AccessDenied
if (err && err.AccessDenied) {
objects.forEach(key => {
objects.forEach(entry => {
errorResults.push({
key,
entry,
error: errors.AccessDenied });
});
// send empty array for inPlay
@ -280,19 +399,23 @@ function multiObjectDelete(authInfo, request, log, callback) {
for (let i = 0; i < authorizationResults.length; i++) {
const result = authorizationResults[i];
// result is { isAllowed: true,
// arn: arn:aws:s3:::bucket/object} unless not allowed
// arn: arn:aws:s3:::bucket/object,
// versionId: sampleversionId } unless not allowed
// in which case no isAllowed key will be present
const slashIndex = result.arn.indexOf('/');
if (slashIndex === -1) {
log.error('wrong arn format from vault');
return next(errors.InternalError);
}
const key = result.arn.slice(slashIndex + 1);
const entry = {
key: result.arn.slice(slashIndex + 1),
versionId: result.versionId,
};
if (result.isAllowed) {
inPlay.push(key);
inPlay.push(entry);
} else {
errorResults.push({
key,
entry,
error: errors.AccessDenied,
});
}
@ -327,9 +450,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
inPlay.forEach(key => {
inPlay.forEach(entry => {
errorResults.push({
key,
entry,
error: errors.AccessDenied,
});
});
@ -344,16 +467,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
},
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
bucket, next) {
return getObjMetadataAndDelete(bucketName, quietSetting,
errorResults, inPlay, log, (err, quietSetting, errorResults,
numOfObjects, successfullyDeleted,
totalContentLengthDeleted) => {
next(err, quietSetting, errorResults,
numOfObjects, successfullyDeleted,
totalContentLengthDeleted, bucket);
});
return getObjMetadataAndDelete(authInfo, canonicalID, request,
bucketName, bucket, quietSetting, errorResults, inPlay,
log, next);
},
], (err, quietSetting, errorResults, numOfObjects,
], (err, quietSetting, errorResults, numOfObjectsRemoved,
successfullyDeleted, totalContentLengthDeleted, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
@ -366,8 +484,13 @@ function multiObjectDelete(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
byteLength: totalContentLengthDeleted,
numberOfObjects: numOfObjects,
numberOfObjects: numOfObjectsRemoved,
});
return callback(null, xml, corsHeaders);
});
}
module.exports = {
getObjMetadataAndDelete,
multiObjectDelete,
};

View File

@ -1,12 +1,13 @@
import async from 'async';
import { errors } from 'arsenal';
import config from '../Config';
const async = require('async');
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import data from '../data/wrapper';
import services from '../services';
import { pushMetric } from '../utapi/utilities';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const data = require('../data/wrapper');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
/**
* multipartDelete - DELETE an open multipart upload from a bucket
@ -19,7 +20,6 @@ import { pushMetric } from '../utapi/utilities';
* @return {undefined} calls callback from router
* with err, result and responseMetaHeaders as arguments
*/
export default
function multipartDelete(authInfo, request, log, callback) {
log.debug('processing request', { method: 'multipartDelete' });
@ -32,7 +32,6 @@ function multipartDelete(authInfo, request, log, callback) {
objectKey,
uploadId,
requestType: 'deleteMPU',
log,
};
// For validating the request at the destinationBucket level
// params are the same as validating at the MPU level
@ -42,7 +41,7 @@ function multipartDelete(authInfo, request, log, callback) {
async.waterfall([
function checkDestBucketVal(next) {
services.metadataValidateAuthorization(metadataValParams,
metadataValidateBucketAndObj(metadataValParams, log,
(err, destinationBucket) => {
if (err) {
return next(err, destinationBucket);
@ -61,6 +60,7 @@ function multipartDelete(authInfo, request, log, callback) {
});
},
function checkMPUval(destBucket, next) {
metadataValParams.log = log;
services.metadataValidateMultipart(metadataValParams,
(err, mpuBucket, mpuOverviewArray) => {
if (err) {
@ -136,8 +136,7 @@ function multipartDelete(authInfo, request, log, callback) {
// if legacy behavior is enabled for 'us-east-1' and
// request is from 'us-east-1', return 404 instead of
// 204
if (config.usEastBehavior &&
locationConstraint === 'us-east-1') {
if (isLegacyAWSBehavior(locationConstraint)) {
return callback(err, corsHeaders);
}
// otherwise ignore error and return 204 status code
@ -146,3 +145,5 @@ function multipartDelete(authInfo, request, log, callback) {
return callback(err, corsHeaders);
});
}
module.exports = multipartDelete;

View File

@ -1,15 +1,25 @@
import async from 'async';
import { errors } from 'arsenal';
const async = require('async');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import data from '../data/wrapper';
import kms from '../kms/wrapper';
import { logger } from '../utilities/logger';
import services from '../services';
import utils from '../utils';
import validateHeaders from '../utilities/validateHeaders';
import { pushMetric } from '../utapi/utilities';
import removeAWSChunked from './apiUtils/object/removeAWSChunked';
const { errors, versioning, s3validators } = require('arsenal');
const getMetaHeaders = s3validators.userMetadata.getMetaHeaders;
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const { checkQueryVersionId, versioningPreprocessing }
= require('./apiUtils/object/versioning');
const data = require('../data/wrapper');
const kms = require('../kms/wrapper');
const logger = require('../utilities/logger');
const services = require('../services');
const validateHeaders = require('../utilities/validateHeaders');
const { pushMetric } = require('../utapi/utilities');
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader;
const versionIdUtils = versioning.VersionID;
/**
* Preps metadata to be saved (based on copy or replace request header)
@ -30,6 +40,13 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
if (whichMetadata !== 'COPY' && whichMetadata !== 'REPLACE') {
return { error: errors.InvalidArgument };
}
let whichTagging = headers['x-amz-tagging-directive'];
// Default is COPY
whichTagging = whichTagging === undefined ? 'COPY' : whichTagging;
if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') {
return { error: errors.InvalidArgument
.customizeDescription('Unknown tagging directive') };
}
const overrideMetadata = {};
if (headers['x-amz-server-side-encryption']) {
overrideMetadata['x-amz-server-side-encryption'] =
@ -56,8 +73,28 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
// If COPY, pull all x-amz-meta keys/values from source object
// Otherwise, pull all x-amz-meta keys/values from request headers
const userMetadata = whichMetadata === 'COPY' ?
utils.getMetaHeaders(sourceObjMD) :
utils.getMetaHeaders(headers);
getMetaHeaders(sourceObjMD) :
getMetaHeaders(headers);
if (userMetadata instanceof Error) {
log.debug('user metadata validation failed', {
error: userMetadata,
method: 'objectCopy',
});
return { error: userMetadata };
}
// If tagging directive is REPLACE but you don't specify any
// tags in the request, the destination object will
// not have any tags.
// If tagging directive is COPY but the source object does not have tags,
// the destination object will not have any tags.
let tagging;
let taggingCopy;
if (whichTagging === 'COPY') {
taggingCopy = sourceObjMD.tags || {};
} else {
tagging = headers['x-amz-tagging'] || '';
}
// If COPY, pull the necessary headers from source object
// Otherwise, pull them from request headers
@ -80,6 +117,8 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
expires: headersToStoreSource.expires,
overrideMetadata,
lastModifiedDate: new Date().toJSON(),
tagging,
taggingCopy,
};
// In case whichMetadata === 'REPLACE' but contentType is undefined in copy
@ -99,13 +138,13 @@ function _prepMetadata(sourceObjMD, headers, sourceIsDestination, authInfo,
* includes normalized headers
* @param {string} sourceBucket - name of source bucket for object copy
* @param {string} sourceObject - name of source object for object copy
* @param {string} sourceVersionId - versionId of source object for copy
* @param {object} log - the log request
* @param {function} callback - final callback to call with the result
* @return {undefined}
*/
export default
function objectCopy(authInfo, request, sourceBucket,
sourceObject, log, callback) {
sourceObject, sourceVersionId, log, callback) {
log.debug('processing request', { method: 'objectCopy' });
const destBucketName = request.bucketName;
const destObjectKey = request.objectKey;
@ -115,34 +154,38 @@ function objectCopy(authInfo, request, sourceBucket,
authInfo,
bucketName: sourceBucket,
objectKey: sourceObject,
versionId: sourceVersionId,
requestType: 'objectGet',
log,
};
const valPutParams = {
authInfo,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
log,
};
const dataStoreContext = {
bucketName: destBucketName,
owner: authInfo.getCanonicalID(),
namespace: request.namespace,
objectKey: destObjectKey,
};
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
if (!validateWebsiteHeader(websiteRedirectHeader)) {
const err = errors.InvalidRedirectLocation;
log.debug('invalid x-amz-website-redirect-location' +
`value ${websiteRedirectHeader}`, { error: err });
return callback(err);
}
const queryContainsVersionId = checkQueryVersionId(request.query);
if (queryContainsVersionId instanceof Error) {
return callback(queryContainsVersionId);
}
return async.waterfall([
function checkDestAuth(next) {
return services.metadataValidateAuthorization(valPutParams,
return metadataValidateBucketAndObj(valPutParams, log,
(err, destBucketMD, destObjMD) => {
if (err) {
log.debug('error validating put part of request',
@ -160,15 +203,32 @@ function objectCopy(authInfo, request, sourceBucket,
});
},
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return services.metadataValidateAuthorization(valGetParams,
return metadataValidateBucketAndObj(valGetParams, log,
(err, sourceBucketMD, sourceObjMD) => {
if (err) {
log.debug('error validating get part of request',
{ error: err });
return next(err, destBucketMD);
return next(err, null, destBucketMD);
}
if (!sourceObjMD) {
const err = sourceVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.debug('no source object', { sourceObject });
return next(err, null, destBucketMD);
}
if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object',
{ sourceObject });
if (sourceVersionId) {
const err = errors.InvalidRequest
.customizeDescription('The source of a copy ' +
'request may not specifically refer to a delete' +
'marker by version id.');
return next(err, destBucketMD);
}
// if user specifies a key in a versioned source bucket
// without specifying a version, and the object has
// a delete marker, return NoSuchKey
return next(errors.NoSuchKey, destBucketMD);
}
const headerValResult =
@ -202,8 +262,8 @@ function objectCopy(authInfo, request, sourceBucket,
sourceObjMD['x-amz-server-side-encryption'];
}
}
return next(null, storeMetadataParams,
dataLocator, destBucketMD, destObjMD);
return next(null, storeMetadataParams, dataLocator,
destBucketMD, destObjMD);
});
},
function goGetData(storeMetadataParams, dataLocator, destBucketMD,
@ -217,6 +277,14 @@ function objectCopy(authInfo, request, sourceBucket,
return next(null, storeMetadataParams, dataLocator, destObjMD,
serverSideEncryption, destBucketMD);
}
const backendInfoObj = locationConstraintCheck(request,
storeMetadataParams.metaHeaders, destBucketMD, log);
if (backendInfoObj.err) {
return next(backendInfoObj.err);
}
const backendInfo = backendInfoObj.backendInfo;
// dataLocator is an array. need to get and put all parts
// For now, copy 1 part at a time. Could increase the second
// argument here to increase the number of parts
@ -237,7 +305,8 @@ function objectCopy(authInfo, request, sourceBucket,
return cb(errors.InternalError);
}
return data.put(cipherBundle, stream,
part.size, dataStoreContext, log,
part.size, dataStoreContext,
backendInfo, log,
(error, partRetrievalInfo) => {
if (error) {
return cb(error);
@ -259,14 +328,17 @@ function objectCopy(authInfo, request, sourceBucket,
}
// Copied object is not encrypted so just put it
// without a cipherBundle
return data.put(null, stream, part.size,
dataStoreContext, log, (error, partRetrievalInfo) => {
dataStoreContext, backendInfo,
log, (error, partRetrievalInfo) => {
if (error) {
return cb(error);
}
const partResult = {
key: partRetrievalInfo.key,
dataStoreName: partRetrievalInfo.dataStoreName,
dataStoreName: partRetrievalInfo.
dataStoreName,
start: part.start,
size: part.size,
};
@ -283,37 +355,58 @@ function objectCopy(authInfo, request, sourceBucket,
destObjMD, serverSideEncryption, destBucketMD);
});
},
function storeNewMetadata(storeMetadataParams, destDataGetInfoArr,
function getVersioningInfo(storeMetadataParams, destDataGetInfoArr,
destObjMD, serverSideEncryption, destBucketMD, next) {
return versioningPreprocessing(destBucketName,
destBucketMD, destObjectKey, destObjMD, log,
(err, options) => {
if (err) {
log.debug('error processing versioning info',
{ error: err });
return next(err, null, destBucketMD);
}
// eslint-disable-next-line
storeMetadataParams.versionId = options.versionId;
// eslint-disable-next-line
storeMetadataParams.versioning = options.versioning;
// eslint-disable-next-line
storeMetadataParams.isNull = options.isNull;
// eslint-disable-next-line
storeMetadataParams.nullVersionId = options.nullVersionId;
const dataToDelete = options.dataToDelete;
return next(null, storeMetadataParams, destDataGetInfoArr,
destObjMD, serverSideEncryption, destBucketMD,
dataToDelete);
});
},
function storeNewMetadata(storeMetadataParams, destDataGetInfoArr,
destObjMD, serverSideEncryption, destBucketMD, dataToDelete, next) {
return services.metadataStoreObject(destBucketName,
destDataGetInfoArr,
serverSideEncryption, storeMetadataParams, err => {
destDataGetInfoArr, serverSideEncryption,
storeMetadataParams, (err, result) => {
if (err) {
log.debug('error storing new metadata', { error: err });
return next(err, destBucketMD);
return next(err, null, destBucketMD);
}
// Clean up any potential orphans in data if object
// put is an overwrite of already existing
// object with same name
// so long as the source is not the same as the destination
let dataToDelete;
if (destObjMD && destObjMD.location &&
!sourceIsDestination) {
dataToDelete = Array.isArray(destObjMD.location) ?
destObjMD.location : [destObjMD.location];
data.batchDelete(dataToDelete,
// object with same name, so long as the source is not
// the same as the destination
if (!sourceIsDestination && dataToDelete) {
data.batchDelete(dataToDelete, request.method, null,
logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids()));
}
const sourceObjSize = storeMetadataParams.size;
const destObjPrevSize = destObjMD ?
const destObjPrevSize = (destObjMD &&
destObjMD['content-length'] !== undefined) ?
destObjMD['content-length'] : null;
return next(null, destBucketMD, storeMetadataParams,
return next(null, result, destBucketMD, storeMetadataParams,
serverSideEncryption, sourceObjSize, destObjPrevSize);
});
},
], (err, destBucketMD, storeMetadataParams, serverSideEncryption,
sourceObjSize, destObjPrevSize) => {
], (err, storingNewMdResult, destBucketMD, storeMetadataParams,
serverSideEncryption, sourceObjSize, destObjPrevSize) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destBucketMD);
@ -338,15 +431,24 @@ function objectCopy(authInfo, request, sourceBucket,
serverSideEncryption.masterKeyId;
}
}
if (sourceVersionId) {
additionalHeaders['x-amz-copy-source-version-id'] =
versionIdUtils.encode(sourceVersionId);
}
const isVersioned = storingNewMdResult && storingNewMdResult.versionId;
if (isVersioned) {
additionalHeaders['x-amz-version-id'] =
versionIdUtils.encode(storingNewMdResult.versionId);
}
pushMetric('copyObject', log, {
authInfo,
bucket: destBucketName,
newByteLength: sourceObjSize,
oldByteLength: destObjPrevSize,
oldByteLength: isVersioned ? null : destObjPrevSize,
});
// TODO: Add version headers for response
// (if source or destination is version).
// Add expiration header if lifecycle enabled
return callback(null, xml, additionalHeaders);
});
}
module.exports = objectCopy;

View File

@ -1,13 +1,18 @@
import { errors } from 'arsenal';
const async = require('async');
const { errors, versioning } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import services from '../services';
import { pushMetric } from '../utapi/utilities';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services');
const { pushMetric } = require('../utapi/utilities');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const versionIdUtils = versioning.VersionID;
/**
* objectDelete - DELETE an object from a bucket
* (currently supports only non-versioned buckets)
* @param {AuthInfo} authInfo - requester's infos
* @param {object} request - request object given by router,
* includes normalized headers
@ -15,52 +20,134 @@ import { pushMetric } from '../utapi/utilities';
* @param {function} cb - final cb to call with the result and response headers
* @return {undefined}
*/
export default function objectDelete(authInfo, request, log, cb) {
function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' });
if (authInfo.isRequesterPublicUser()) {
log.warn('operation not available for public user');
log.debug('operation not available for public user');
return cb(errors.AccessDenied);
}
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return cb(decodedVidResult);
}
const reqVersionId = decodedVidResult;
const valParams = {
authInfo,
bucketName,
objectKey,
versionId: reqVersionId,
requestType: 'objectDelete',
log,
};
return services.metadataValidateAuthorization(valParams,
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
const canonicalID = authInfo.getCanonicalID();
return async.waterfall([
function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(valParams, log,
(err, bucketMD, objMD) => {
if (err) {
log.debug('error processing request', {
error: err,
method: 'metadataValidateAuthorization',
});
return cb(err, corsHeaders);
return next(err, bucketMD);
}
const versioningCfg = bucketMD.getVersioningConfiguration();
if (!objMD) {
return cb(errors.NoSuchKey, corsHeaders);
if (!versioningCfg) {
return next(errors.NoSuchKey, bucketMD);
}
if (objMD['content-length']) {
// AWS does not return an error when trying to delete a
// specific version that does not exist. We skip to the end
// of the waterfall here.
if (reqVersionId) {
log.debug('trying to delete specific version ' +
' that does not exist');
return next(errors.NoSuchVersion, bucketMD);
}
// To adhere to AWS behavior, create a delete marker even
// if trying to delete an object that does not exist when
// versioning has been configured
return next(null, bucketMD, objMD);
}
if (objMD['content-length'] !== undefined) {
log.end().addDefaultFields({
contentLength: objMD['content-length'],
bytesDeleted: objMD['content-length'],
});
}
return services.deleteObject(bucketName, objMD, objectKey, log,
err => {
return next(null, bucketMD, objMD);
});
},
function getVersioningInfo(bucketMD, objectMD, next) {
return preprocessingVersioningDelete(bucketName,
bucketMD, objectMD, reqVersionId, log,
(err, options) => {
if (err) {
return cb(err, corsHeaders);
log.error('err processing versioning info',
{ error: err });
return next(err, bucketMD);
}
pushMetric('deleteObject', log, {
authInfo,
bucket: bucketName,
byteLength: objMD['content-length'],
numberOfObjects: 1,
});
return cb(null, corsHeaders);
return next(null, bucketMD, objectMD, options);
});
},
function deleteOperation(bucketMD, objectMD, delOptions, next) {
const deleteInfo = {
removeDeleteMarker: false,
newDeleteMarker: false,
};
if (delOptions && delOptions.deleteData) {
if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set
// response headers accordingly
deleteInfo.removeDeleteMarker = true;
}
return services.deleteObject(bucketName, objectMD, objectKey,
delOptions, log, (err, delResult) => next(err, bucketMD,
objectMD, delResult, deleteInfo));
}
// putting a new delete marker
deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
},
], (err, bucketMD, objectMD, result, deleteInfo) => {
const resHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD);
// if deleting a specific version or delete marker, return version id
// in the response headers, even in case of NoSuchVersion
if (reqVersionId) {
resHeaders['x-amz-version-id'] = reqVersionId === 'null' ?
reqVersionId : versionIdUtils.encode(reqVersionId);
if (deleteInfo && deleteInfo.removeDeleteMarker) {
resHeaders['x-amz-delete-marker'] = true;
}
}
if (err) {
log.debug('error processing request', { error: err,
method: 'objectDelete' });
return cb(err, resHeaders);
}
if (deleteInfo.newDeleteMarker) {
// if we created a new delete marker, return true for
// x-amz-delete-marker and the version ID of the new delete marker
if (result.versionId) {
resHeaders['x-amz-delete-marker'] = true;
resHeaders['x-amz-version-id'] = result.versionId === 'null' ?
result.versionId : versionIdUtils.encode(result.versionId);
}
pushMetric('putDeleteMarkerObject', log, { authInfo,
bucket: bucketName });
} else {
pushMetric('deleteObject', log, { authInfo, bucket: bucketName,
byteLength: objectMD['content-length'], numberOfObjects: 1 });
}
return cb(err, resHeaders);
});
}
module.exports = objectDelete;

View File

@ -0,0 +1,94 @@
const async = require('async');
const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
/**
* Object Delete Tagging - Delete tag set from an object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function objectDeleteTagging(authInfo, request, log, callback) {
log.debug('processing request', { method: 'objectDeleteTagging' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
const reqVersionId = decodedVidResult;
const metadataValParams = {
authInfo,
bucketName,
objectKey,
requestType: 'bucketOwnerAction',
versionId: reqVersionId,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',
{ method: 'objectDeleteTagging', error: err });
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error no object metadata found',
{ method: 'objectDeleteTagging', error: err });
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
log.trace('version is a delete marker',
{ method: 'objectDeleteTagging' });
return next(errors.MethodNotAllowed, bucket);
}
return next(null, bucket, objectMD);
}),
(bucket, objectMD, next) => {
// eslint-disable-next-line no-param-reassign
objectMD.tags = {};
const params = objectMD.versionId ? { versionId:
objectMD.versionId } : {};
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
log, err =>
next(err, bucket, objectMD));
},
], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('error processing request', { error: err,
method: 'objectDeleteTagging' });
} else {
pushMetric('deleteObjectTagging', log, {
authInfo,
bucket: bucketName,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD);
}
return callback(err, additionalResHeaders);
});
}
module.exports = objectDeleteTagging;

View File

@ -1,91 +1,113 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import { parseRange } from './apiUtils/object/parseRange';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import collectResponseHeaders from '../utilities/collectResponseHeaders';
import services from '../services';
import validateHeaders from '../utilities/validateHeaders';
import { pushMetric } from '../utapi/utilities';
const { parseRange } = require('arsenal/lib/network/http/utils');
const { decodeVersionId } = require('./apiUtils/object/versioning');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const validateHeaders = require('../utilities/validateHeaders');
const { pushMetric } = require('../utapi/utilities');
const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
const setPartRanges = require('./apiUtils/object/setPartRanges');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
/**
* GET Object - Get an object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - normalized request object
* @param {boolean} returnTagCount - returns the x-amz-tagging-count header
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
export default
function objectGet(authInfo, request, log, callback) {
function objectGet(authInfo, request, returnTagCount, log, callback) {
log.debug('processing request', { method: 'objectGet' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return callback(decodedVidResult);
}
const versionId = decodedVidResult;
const mdValParams = {
authInfo,
bucketName,
objectKey,
versionId,
requestType: 'objectGet',
log,
};
services.metadataValidateAuthorization(mdValParams, (err, bucket,
objMD) => {
return metadataValidateBucketAndObj(mdValParams, log,
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', { error: err });
log.debug('error processing request', {
error: err,
method: 'metadataValidateBucketAndObj',
});
return callback(err, null, corsHeaders);
}
if (!objMD) {
return callback(errors.NoSuchKey, null, corsHeaders);
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
return callback(err, null, corsHeaders);
}
const verCfg = bucket.getVersioningConfiguration();
if (objMD.isDeleteMarker) {
const responseMetaHeaders = Object.assign({},
{ 'x-amz-delete-marker': true }, corsHeaders);
if (!versionId) {
return callback(errors.NoSuchKey, null, responseMetaHeaders);
}
// return MethodNotAllowed if requesting a specific
// version that has a delete marker
responseMetaHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objMD);
return callback(errors.MethodNotAllowed, null,
responseMetaHeaders);
}
const headerValResult = validateHeaders(objMD, request.headers);
if (headerValResult.error) {
return callback(headerValResult.error, null, corsHeaders);
}
const responseMetaHeaders = collectResponseHeaders(objMD, corsHeaders);
// 0 bytes file
if (objMD.location === null) {
const responseMetaHeaders = collectResponseHeaders(objMD,
corsHeaders, verCfg, returnTagCount);
const objLength = (objMD.location === null ?
0 : parseInt(objMD['content-length'], 10));
let byteRange;
if (request.headers.range) {
return callback(errors.InvalidRange, null, corsHeaders);
}
pushMetric('getObject', log, {
authInfo,
bucket: bucketName,
newByteLength: 0,
});
return callback(null, null, responseMetaHeaders);
}
let range;
let maxContentLength;
if (request.headers.range) {
maxContentLength =
parseInt(responseMetaHeaders['Content-Length'], 10);
responseMetaHeaders['Accept-Ranges'] = 'bytes';
const parseRangeRes = parseRange(request.headers.range,
maxContentLength);
range = parseRangeRes.range;
const error = parseRangeRes.error;
const { range, error } = parseRange(request.headers.range,
objLength);
if (error) {
return callback(error, null, corsHeaders);
}
responseMetaHeaders['Accept-Ranges'] = 'bytes';
if (range) {
byteRange = range;
// End of range should be included so + 1
responseMetaHeaders['Content-Length'] =
Math.min(maxContentLength - range[0],
range[1] - range[0] + 1);
responseMetaHeaders['Content-Range'] = `bytes ${range[0]}-`
+ `${Math.min(maxContentLength - 1, range[1])}` +
`/${maxContentLength}`;
range[1] - range[0] + 1;
responseMetaHeaders['Content-Range'] =
`bytes ${range[0]}-${range[1]}/${objLength}`;
}
}
// To provide for backwards compatibility before md-model-version 2,
// need to handle cases where objMD.location is just a string
const dataLocator = Array.isArray(objMD.location) ?
let dataLocator = null;
if (objMD.location !== null) {
// To provide for backwards compatibility before
// md-model-version 2, need to handle cases where
// objMD.location is just a string
dataLocator = Array.isArray(objMD.location) ?
objMD.location : [{ key: objMD.location }];
// If have a data model before version 2, cannot support get range
// for objects with multiple parts
if (range && dataLocator.length > 1 &&
// If have a data model before version 2, cannot support
// get range for objects with multiple parts
if (byteRange && dataLocator.length > 1 &&
dataLocator[0].start === undefined) {
return callback(errors.NotImplemented, null, corsHeaders);
}
@ -97,11 +119,15 @@ function objectGet(authInfo, request, log, callback) {
objMD['x-amz-server-side-encryption'];
}
}
dataLocator = setPartRanges(dataLocator, byteRange);
}
pushMetric('getObject', log, {
authInfo,
bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'],
});
return callback(null, dataLocator, responseMetaHeaders, range);
return callback(null, dataLocator, responseMetaHeaders, byteRange);
});
}
module.exports = objectGet;

View File

@ -1,11 +1,14 @@
import { errors } from 'arsenal';
const async = require('async');
const { errors } = require('arsenal');
import aclUtils from '../utilities/aclUtils';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import { pushMetric } from '../utapi/utilities';
import services from '../services';
import vault from '../auth/vault';
const aclUtils = require('../utilities/aclUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const { pushMetric } = require('../utapi/utilities');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const vault = require('../auth/vault');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
// Sample XML response:
/*
@ -28,7 +31,6 @@ import vault from '../auth/vault';
</AccessControlPolicy>
*/
/**
* objectGetACL - Return ACL for object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -37,16 +39,27 @@ import vault from '../auth/vault';
* @param {function} callback - callback to respond to http request
* @return {undefined}
*/
export default function objectGetACL(authInfo, request, log, callback) {
function objectGetACL(authInfo, request, log, callback) {
log.debug('processing request', { method: 'objectGetACL' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return callback(decodedVidResult);
}
const versionId = decodedVidResult;
const metadataValParams = {
authInfo,
bucketName,
objectKey,
versionId,
requestType: 'objectGetACL',
log,
};
const grantInfo = {
grants: [],
@ -60,20 +73,38 @@ export default function objectGetACL(authInfo, request, log, callback) {
constants.logId,
];
services.metadataValidateAuthorization(metadataValParams,
return async.waterfall([
function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('request authorization failed',
{ method: 'objectGetACL', error: err });
return callback(err, null, corsHeaders);
return next(err);
}
if (!objectMD) {
const err = versionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error processing request',
{ method: 'objectGetACL', error: err });
return callback(errors.NoSuchKey, null, corsHeaders);
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
if (versionId) {
log.trace('requested version is delete marker',
{ method: 'objectGetACL' });
return next(errors.MethodNotAllowed);
}
log.trace('most recent version is delete marker',
{ method: 'objectGetACL' });
return next(errors.NoSuchKey);
}
return next(null, bucket, objectMD);
});
},
function gatherACLs(bucket, objectMD, next) {
const verCfg = bucket.getVersioningConfiguration();
const resVersionId = getVersionIdResHeader(verCfg, objectMD);
const objectACL = objectMD.acl;
const allSpecificGrants = [].concat(
objectACL.FULL_CONTROL,
@ -105,11 +136,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
}
grantInfo.grants = grantInfo.grants.concat(cannedGrants);
const xml = aclUtils.convertToXml(grantInfo);
pushMetric('getObjectAcl', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
return next(null, bucket, xml, resVersionId);
}
/**
* Build array of all canonicalIDs used in ACLs so duplicates
@ -139,11 +166,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
*/
grantInfo.grants = grantInfo.grants.concat(uriGrantInfo);
const xml = aclUtils.convertToXml(grantInfo);
pushMetric('getObjectAcl', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
return next(null, bucket, xml, resVersionId);
}
/**
* If acl's set by account canonicalID,
@ -154,7 +177,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
if (err) {
log.trace('error processing request',
{ method: 'objectGetACL', error: err });
return callback(err, null, corsHeaders);
return next(err, bucket);
}
const individualGrants = canonicalIDs.map(canonicalID => {
/**
@ -180,11 +203,22 @@ export default function objectGetACL(authInfo, request, log, callback) {
.concat(individualGrants).concat(uriGrantInfo);
// parse info about accounts and owner info to convert to xml
const xml = aclUtils.convertToXml(grantInfo);
return next(null, bucket, xml, resVersionId);
});
},
], (err, bucket, xml, resVersionId) => {
const resHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
return callback(err, null, resHeaders);
}
pushMetric('getObjectAcl', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml, corsHeaders);
});
resHeaders['x-amz-version-id'] = resVersionId;
return callback(null, xml, resHeaders);
});
}
module.exports = objectGetACL;

View File

@ -0,0 +1,95 @@
const async = require('async');
const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { convertToXml } = require('./apiUtils/object/tagging');
/**
* Object Get Tagging - Return tag for object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function objectGetTagging(authInfo, request, log, callback) {
log.debug('processing request', { method: 'objectGetTagging' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
const reqVersionId = decodedVidResult;
const metadataValParams = {
authInfo,
bucketName,
objectKey,
requestType: 'bucketOwnerAction',
versionId: reqVersionId,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',
{ method: 'objectGetTagging', error: err });
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error no object metadata found',
{ method: 'objectGetTagging', error: err });
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
if (reqVersionId) {
log.trace('requested version is delete marker',
{ method: 'objectGetTagging' });
return next(errors.MethodNotAllowed);
}
log.trace('most recent version is delete marker',
{ method: 'objectGetTagging' });
return next(errors.NoSuchKey);
}
return next(null, bucket, objectMD);
}),
(bucket, objectMD, next) => {
const tags = objectMD.tags;
const xml = convertToXml(tags);
next(null, bucket, xml, objectMD);
},
], (err, bucket, xml, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('error processing request', { error: err,
method: 'objectGetTagging' });
} else {
pushMetric('getObjectTagging', log, {
authInfo,
bucket: bucketName,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD);
}
return callback(err, xml, additionalResHeaders);
});
}
module.exports = objectGetTagging;

View File

@ -1,10 +1,13 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import collectResponseHeaders from '../utilities/collectResponseHeaders';
import services from '../services';
import validateHeaders from '../utilities/validateHeaders';
import { pushMetric } from '../utapi/utilities';
const { decodeVersionId } = require('./apiUtils/object/versioning');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const validateHeaders = require('../utilities/validateHeaders');
const { pushMetric } = require('../utapi/utilities');
const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
/**
* HEAD Object - Same as Get Object but only respond with headers
@ -16,42 +19,66 @@ import { pushMetric } from '../utapi/utilities';
* @return {undefined}
*
*/
export default function objectHead(authInfo, request, log, callback) {
function objectHead(authInfo, request, log, callback) {
log.debug('processing request', { method: 'objectHead' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const metadataValParams = {
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return callback(decodedVidResult);
}
const versionId = decodedVidResult;
const mdValParams = {
authInfo,
bucketName,
objectKey,
versionId,
requestType: 'objectHead',
log,
};
return services.metadataValidateAuthorization(metadataValParams,
return metadataValidateBucketAndObj(mdValParams, log,
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
log.debug('error validating request', {
error: err,
method: 'metadataValidateAuthorization',
method: 'objectHead',
});
return callback(err, corsHeaders);
}
if (!objMD) {
return callback(errors.NoSuchKey, corsHeaders);
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
return callback(err, corsHeaders);
}
const verCfg = bucket.getVersioningConfiguration();
if (objMD.isDeleteMarker) {
const responseHeaders = Object.assign({},
{ 'x-amz-delete-marker': true }, corsHeaders);
if (!versionId) {
return callback(errors.NoSuchKey, responseHeaders);
}
// return MethodNotAllowed if requesting a specific
// version that has a delete marker
responseHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objMD);
return callback(errors.MethodNotAllowed, responseHeaders);
}
const headerValResult = validateHeaders(objMD, request.headers);
if (headerValResult.error) {
return callback(headerValResult.error, corsHeaders);
}
const responseMetaHeaders = collectResponseHeaders(objMD,
corsHeaders);
pushMetric('headObject', log, {
authInfo,
bucket: bucketName,
});
return callback(null, responseMetaHeaders);
const responseHeaders =
collectResponseHeaders(objMD, corsHeaders, verCfg);
pushMetric('headObject', log, { authInfo, bucket: bucketName });
return callback(null, responseHeaders);
});
}
module.exports = objectHead;

View File

@ -1,151 +1,16 @@
import { errors } from 'arsenal';
const async = require('async');
const { errors, versioning } = require('arsenal');
import data from '../data/wrapper';
import services from '../services';
import aclUtils from '../utilities/aclUtils';
import utils from '../utils';
import { cleanUpBucket } from './apiUtils/bucket/bucketCreation';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import { dataStore } from './apiUtils/object/storeObject';
import constants from '../../constants';
import { logger } from '../utilities/logger';
import { pushMetric } from '../utapi/utilities';
import kms from '../kms/wrapper';
import removeAWSChunked from './apiUtils/object/removeAWSChunked';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, deleteLog, callback) {
services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, contentMD5) => {
if (err) {
return callback(err);
}
if (dataToDelete) {
data.batchDelete(dataToDelete, deleteLog);
}
return callback(null, contentMD5);
});
}
function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
cipherBundle, request, streamingV4Params, log, callback) {
const size = request.parsedContentLength;
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (!utils.validateWebsiteHeader(websiteRedirectHeader)) {
const err = errors.InvalidRedirectLocation;
log.debug('invalid x-amz-website-redirect-location' +
`value ${websiteRedirectHeader}`, { error: err });
return callback(err);
}
const metaHeaders = utils.getMetaHeaders(request.headers);
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
const objectKeyContext = {
bucketName,
owner: canonicalID,
namespace: request.namespace,
};
// If the request was made with a pre-signed url, the x-amz-acl 'header'
// might be in the query string rather than the actual headers so include
// it here
const headers = request.headers;
if (request.query && request.query['x-amz-acl']) {
headers['x-amz-acl'] = request.query['x-amz-acl'];
}
const metadataStoreParams = {
objectKey,
authInfo,
metaHeaders,
size,
contentType: request.headers['content-type'],
cacheControl: request.headers['cache-control'],
contentDisposition: request.headers['content-disposition'],
contentEncoding:
removeAWSChunked(request.headers['content-encoding']),
expires: request.headers.expires,
headers,
log,
};
let dataToDelete;
if (objMD && objMD.location) {
dataToDelete = Array.isArray(objMD.location) ?
objMD.location : [objMD.location];
}
// null - new object
// 0 or > 0 - existing object with content-length 0 or greater than 0
const prevContentLen = objMD && objMD['content-length'] !== undefined ?
objMD['content-length'] : null;
if (size !== 0) {
log.trace('storing object in data', {
method: 'services.metadataValidateAuthorization',
});
return dataStore(objectKeyContext, cipherBundle, request, size,
streamingV4Params, log, (err, dataGetInfo, calculatedHash) => {
if (err) {
log.trace('error from data', {
error: err,
method: 'dataStore',
});
return callback(err);
}
// So that data retrieval information for MPU's and
// regular puts are stored in the same data structure,
// place the retrieval info here into a single element array
const dataGetInfoArr = [{
key: dataGetInfo.key,
size,
start: 0,
dataStoreName: dataGetInfo.dataStoreName,
}];
if (cipherBundle) {
dataGetInfoArr[0].cryptoScheme = cipherBundle.cryptoScheme;
dataGetInfoArr[0].cipheredDataKey =
cipherBundle.cipheredDataKey;
}
metadataStoreParams.contentMD5 = calculatedHash;
return _storeInMDandDeleteData(
bucketName, dataGetInfoArr, cipherBundle,
metadataStoreParams, dataToDelete,
logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids()), (err, contentMD5) => {
if (err) {
return callback(err);
}
pushMetric('putObject', log, {
authInfo,
bucket: bucketName,
newByteLength: size,
oldByteLength: prevContentLen,
});
return callback(null, contentMD5);
});
});
}
log.trace('content-length is 0 so only storing metadata', {
method: 'services.metadataValidateAuthorization',
});
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
const dataGetInfo = null;
return _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete,
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids()), (err, contentMD5) => {
if (err) {
return callback(err);
}
pushMetric('putObject', log, {
authInfo,
bucket: bucketName,
newByteLength: size,
oldByteLength: prevContentLen,
});
return callback(null, contentMD5);
});
}
const aclUtils = require('../utilities/aclUtils');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { checkQueryVersionId } = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const kms = require('../kms/wrapper');
const versionIdUtils = versioning.VersionID;
/**
* PUT Object in the requested bucket. Steps include:
@ -166,104 +31,95 @@ function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
* @param {Function} callback - final callback to call with the result
* @return {undefined}
*/
export default
function objectPut(authInfo, request, streamingV4Params, log, callback) {
log.debug('processing request', { method: 'objectPut' });
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header');
return callback(errors.InvalidArgument);
}
const queryContainsVersionId = checkQueryVersionId(request.query);
if (queryContainsVersionId instanceof Error) {
return callback(queryContainsVersionId);
}
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const valParams = {
authInfo,
bucketName,
objectKey,
requestType: 'objectPut',
log,
};
const requestType = 'objectPut';
const valParams = { authInfo, bucketName, objectKey, requestType };
const canonicalID = authInfo.getCanonicalID();
log.trace('owner canonicalID to send to data', { canonicalID });
return services.metadataValidateAuthorization(valParams, (err, bucket,
objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
return metadataValidateBucketAndObj(valParams, log,
(err, bucket, objMD) => {
const responseHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('error processing request', {
error: err,
method: 'services.metadataValidateAuthorization',
method: 'metadataValidateBucketAndObj',
});
return callback(err, null, corsHeaders);
return callback(err, responseHeaders);
}
if (bucket.hasDeletedFlag() &&
canonicalID !== bucket.getOwner()) {
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
log.trace('deleted flag on bucket and request ' +
'from non-owner account');
return callback(errors.NoSuchBucket);
}
return async.waterfall([
function handleTransientOrDeleteBuckets(next) {
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
return cleanUpBucket(bucket, canonicalID, log, next);
}
return next();
},
function createCipherBundle(next) {
const serverSideEncryption = bucket.getServerSideEncryption();
if (bucket.hasTransientFlag() ||
bucket.hasDeletedFlag()) {
log.trace('transient or deleted flag so cleaning up bucket');
return cleanUpBucket(bucket,
canonicalID, log, err => {
if (err) {
log.debug('error cleaning up bucket with flag',
{ error: err,
transientFlag:
bucket.hasTransientFlag(),
deletedFlag:
bucket.hasDeletedFlag(),
});
// To avoid confusing user with error
// from cleaning up
// bucket return InternalError
return callback(errors.InternalError, null,
corsHeaders);
}
if (serverSideEncryption) {
return kms.createCipherBundle(
serverSideEncryption,
log, (err, cipherBundle) => {
serverSideEncryption, log, next);
}
return next(null, null);
},
function objectCreateAndStore(cipherBundle, next) {
return createAndStoreObject(bucketName,
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
request, false, streamingV4Params, log, next);
},
], (err, storingResult) => {
if (err) {
return callback(errors.InternalError,
null, corsHeaders);
return callback(err, responseHeaders);
}
return _storeIt(bucketName, objectKey,
objMD, authInfo, canonicalID,
cipherBundle, request,
streamingV4Params, log,
(err, contentMD5) =>
callback(err, contentMD5,
corsHeaders));
const newByteLength = request.parsedContentLength;
// Utapi expects null or a number for oldByteLength:
// * null - new object
// * 0 or > 0 - existing object with content-length 0 or > 0
// objMD here is the master version that we would
// have overwritten if there was an existing version or object
//
// TODO: Handle utapi metrics for null version overwrites.
const oldByteLength = objMD && objMD['content-length']
!== undefined ? objMD['content-length'] : null;
if (storingResult) {
// ETag's hex should always be enclosed in quotes
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
}
const vcfg = bucket.getVersioningConfiguration();
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
if (isVersionedObj) {
if (storingResult && storingResult.versionId) {
responseHeaders['x-amz-version-id'] =
versionIdUtils.encode(storingResult.versionId);
}
}
pushMetric('putObject', log, {
authInfo,
bucket: bucketName,
newByteLength,
oldByteLength: isVersionedObj ? null : oldByteLength,
});
}
return _storeIt(bucketName, objectKey, objMD,
authInfo, canonicalID, null, request,
streamingV4Params, log,
(err, contentMD5) =>
callback(err, contentMD5, corsHeaders));
return callback(null, responseHeaders);
});
}
if (serverSideEncryption) {
return kms.createCipherBundle(
serverSideEncryption,
log, (err, cipherBundle) => {
if (err) {
return callback(errors.InternalError, null,
corsHeaders);
}
return _storeIt(bucketName, objectKey, objMD,
authInfo, canonicalID, cipherBundle,
request, streamingV4Params, log,
(err, contentMD5) =>
callback(err, contentMD5, corsHeaders));
});
}
return _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
null, request, streamingV4Params, log,
(err, contentMD5) =>
callback(err, contentMD5, corsHeaders));
});
}
module.exports = objectPut;

View File

@ -1,13 +1,15 @@
import { errors } from 'arsenal';
import async from 'async';
const async = require('async');
const { errors } = require('arsenal');
import acl from '../metadata/acl';
import aclUtils from '../utilities/aclUtils';
import { pushMetric } from '../utapi/utilities';
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import services from '../services';
import vault from '../auth/vault';
const acl = require('../metadata/acl');
const aclUtils = require('../utilities/aclUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const vault = require('../auth/vault');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
/*
Format of xml request:
@ -39,7 +41,7 @@ import vault from '../auth/vault';
* @param {function} cb - cb to server
* @return {undefined}
*/
export default function objectPutACL(authInfo, request, log, cb) {
function objectPutACL(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectPutACL' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
@ -61,13 +63,24 @@ export default function objectPutACL(authInfo, request, log, cb) {
constants.logId,
];
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return cb(decodedVidResult);
}
const reqVersionId = decodedVidResult;
const metadataValParams = {
authInfo,
bucketName,
objectKey,
requestType: 'objectPutACL',
log,
versionId: reqVersionId,
};
const possibleGrants = ['FULL_CONTROL', 'WRITE_ACP', 'READ', 'READ_ACP'];
const addACLParams = {
Canned: '',
@ -88,11 +101,27 @@ export default function objectPutACL(authInfo, request, log, cb) {
request.headers['x-amz-grant-full-control'], 'FULL_CONTROL');
return async.waterfall([
next => services.metadataValidateAuthorization(metadataValParams, next),
(bucket, objectMD, next) => {
if (!objectMD) {
return next(errors.NoSuchKey, bucket);
function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => {
if (err) {
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
log.trace('delete marker detected',
{ method: 'objectPutACL' });
return next(errors.MethodNotAllowed, bucket);
}
return next(null, bucket, objectMD);
});
},
function parseAclFromXml(bucket, objectMD, next) {
metadataValParams.versionId = objectMD.versionId;
// If not setting acl through headers, parse body
let jsonGrants;
let aclOwnerID;
@ -117,7 +146,7 @@ export default function objectPutACL(authInfo, request, log, cb) {
log.debug('using acls from request headers');
return next(null, bucket, objectMD, jsonGrants, aclOwnerID);
},
(bucket, objectMD, jsonGrants, aclOwnerID, next) => {
function processAcls(bucket, objectMD, jsonGrants, aclOwnerID, next) {
if (newCannedACL) {
log.debug('canned acl', { cannedAcl: newCannedACL });
addACLParams.Canned = newCannedACL;
@ -239,25 +268,35 @@ export default function objectPutACL(authInfo, request, log, cb) {
aclUtils.sortHeaderGrants(allUsers, addACLParams);
return next(null, bucket, objectMD, revisedAddACLParams);
},
function waterfall4(bucket, objectMD, ACLParams, next) {
function addAclsToObjMD(bucket, objectMD, ACLParams, next) {
// Add acl's to object metadata
acl.addObjectACL(bucket, objectKey, objectMD, ACLParams, log, next);
const params = metadataValParams.versionId ?
{ versionId: metadataValParams.versionId } : {};
acl.addObjectACL(bucket, objectKey, objectMD,
ACLParams, params, log, err => next(err, bucket, objectMD));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
], (err, bucket, objectMD) => {
const resHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('error processing request', {
error: err,
method: 'objectPutACL',
});
return cb(err, corsHeaders);
return cb(err, resHeaders);
}
const verCfg = bucket.getVersioningConfiguration();
resHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD);
log.trace('processed request successfully in object put acl api');
pushMetric('putObjectAcl', log, {
authInfo,
bucket: bucketName,
});
return cb(null, corsHeaders);
return cb(null, resHeaders);
});
}
module.exports = objectPutACL;

View File

@ -1,16 +1,20 @@
import async from 'async';
import { errors } from 'arsenal';
const async = require('async');
const { errors, versioning } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import data from '../data/wrapper';
import kms from '../kms/wrapper';
import metadata from '../metadata/wrapper';
import RelayMD5Sum from '../utilities/RelayMD5Sum';
import { logger } from '../utilities/logger';
import services from '../services';
import setUpCopyLocator from './apiUtils/object/setUpCopyLocator';
import validateHeaders from '../utilities/validateHeaders';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { BackendInfo } = require('./apiUtils/object/BackendInfo');
const constants = require('../../constants');
const data = require('../data/wrapper');
const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
const RelayMD5Sum = require('../utilities/RelayMD5Sum');
const logger = require('../utilities/logger');
const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const validateHeaders = require('../utilities/validateHeaders');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const versionIdUtils = versioning.VersionID;
/**
@ -21,22 +25,23 @@ import validateHeaders from '../utilities/validateHeaders';
* includes normalized headers
* @param {string} sourceBucket - name of source bucket for object copy
* @param {string} sourceObject - name of source object for object copy
* @param {string} reqVersionId - versionId of the source object for copy
* @param {object} log - the request logger
* @param {function} callback - final callback to call with the result
* @return {undefined}
*/
export default
function objectPutCopyPart(authInfo, request, sourceBucket,
sourceObject, log, callback) {
sourceObject, reqVersionId, log, callback) {
log.debug('processing request', { method: 'objectPutCopyPart' });
const destBucketName = request.bucketName;
const destObjectKey = request.objectKey;
const mpuBucketName = `${constants.mpuBucketPrefix}${destBucketName}`;
const valGetParams = {
authInfo,
bucketName: sourceBucket,
objectKey: sourceObject,
versionId: reqVersionId,
requestType: 'objectGet',
log,
};
const partNumber = Number.parseInt(request.query.partNumber, 10);
@ -57,7 +62,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
log,
};
// For validating the request at the MPU, the params are the same
@ -74,11 +78,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
bucketName: destBucketName,
owner: authInfo.getCanonicalID(),
namespace: request.namespace,
objectKey: destObjectKey,
partNumber: paddedPartNumber,
uploadId,
};
return async.waterfall([
function checkDestAuth(next) {
return services.metadataValidateAuthorization(valPutParams,
return metadataValidateBucketAndObj(valPutParams, log,
(err, destBucketMD) => {
if (err) {
log.debug('error validating authorization for ' +
@ -97,7 +104,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
});
},
function checkSourceAuthorization(destBucketMD, next) {
return services.metadataValidateAuthorization(valGetParams,
return metadataValidateBucketAndObj(valGetParams, log,
(err, sourceBucketMD, sourceObjMD) => {
if (err) {
log.debug('error validating get part of request',
@ -106,6 +113,23 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
if (!sourceObjMD) {
log.debug('no source object', { sourceObject });
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
return next(err, destBucketMD);
}
if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object',
{ sourceObject });
if (reqVersionId) {
const err = errors.InvalidRequest
.customizeDescription('The source of a copy ' +
'request may not specifically refer to a delete' +
'marker by version id.');
return next(err, destBucketMD);
}
// if user specifies a key in a versioned source bucket
// without specifying a version, and the object has a
// delete marker, return NoSuchKey
return next(errors.NoSuchKey, destBucketMD);
}
const headerValResult =
@ -118,33 +142,92 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
if (copyLocator.error) {
return next(copyLocator.error, destBucketMD);
}
let sourceVerId = undefined;
// If specific version requested, include copy source
// version id in response. Include in request by default
// if versioning is enabled or suspended.
if (sourceBucketMD.getVersioningConfiguration() ||
reqVersionId) {
if (sourceObjMD.isNull || !sourceObjMD.versionId) {
sourceVerId = 'null';
} else {
sourceVerId =
versionIdUtils.encode(sourceObjMD.versionId);
}
}
return next(null, copyLocator.dataLocator, destBucketMD,
copyLocator.copyObjectSize);
copyLocator.copyObjectSize, sourceVerId);
});
},
function checkMPUBucketAuth(dataLocator, destBucketMD,
copyObjectSize, next) {
return services.metadataValidateMultipart(valMPUParams,
// get MPU shadow bucket to get splitter based on MD version
function getMpuShadowBucket(dataLocator, destBucketMD,
copyObjectSize, sourceVerId, next) {
return metadata.getBucket(mpuBucketName, log,
(err, mpuBucket) => {
if (err) {
log.trace('error authorizing based on mpu bucket',
{ error: err });
return next(err, destBucketMD);
if (err && err.NoSuchBucket) {
return next(errors.NoSuchUpload);
}
return next(null, dataLocator,
destBucketMD, mpuBucket, copyObjectSize);
if (err) {
log.error('error getting the shadow mpu bucket', {
error: err,
method: 'objectPutCopyPart::metadata.getBucket',
});
return next(err);
}
let splitter = constants.splitter;
if (mpuBucket.getMdBucketModelVersion() < 2) {
splitter = constants.oldSplitter;
}
return next(null, dataLocator, destBucketMD,
copyObjectSize, sourceVerId, splitter);
});
},
// Get MPU overview object to check authorization to put a part
// and to get any object location constraint info
function getMpuOverviewObject(dataLocator, destBucketMD,
copyObjectSize, sourceVerId, splitter, next) {
const mpuOverviewKey =
`overview${splitter}${destObjectKey}${splitter}${uploadId}`;
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
null, log, (err, res) => {
if (err) {
if (err.NoSuchKey) {
return next(errors.NoSuchUpload);
}
log.error('error getting overview object from ' +
'mpu bucket', {
error: err,
method: 'objectPutCopyPart::' +
'metadata.getObjectMD',
});
return next(err);
}
const initiatorID = res.initiator.ID;
const requesterID = authInfo.isRequesterAnIAMUser() ?
authInfo.getArn() : authInfo.getCanonicalID();
if (initiatorID !== requesterID) {
return next(errors.AccessDenied);
}
const objectLocationConstraint =
res.controllingLocationConstraint;
return next(null, dataLocator, destBucketMD,
objectLocationConstraint, copyObjectSize,
sourceVerId);
});
},
function goGetData(dataLocator, destBucketMD,
mpuBucket, copyObjectSize, next) {
objectLocationConstraint, copyObjectSize, sourceVerId, next) {
const serverSideEncryption = destBucketMD.getServerSideEncryption();
// skip if 0 byte object
if (dataLocator.length === 0) {
return next(null, [], constants.emptyFileMd5,
copyObjectSize, mpuBucket,
serverSideEncryption, destBucketMD);
return process.nextTick(() => {
next(null, destBucketMD, [], constants.emptyFileMd5,
copyObjectSize, sourceVerId, serverSideEncryption);
});
}
const backendInfo = new BackendInfo(objectLocationConstraint);
// totalHash will be sent through the RelayMD5Sum transform streams
// to collect the md5 from multiple streams
let totalHash;
@ -177,7 +260,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
return cb(errors.InternalError);
}
return data.put(cipherBundle, hashedStream,
numberPartSize, dataStoreContext, log,
numberPartSize, dataStoreContext,
backendInfo, log,
(error, partRetrievalInfo) => {
if (error) {
log.debug('error putting ' +
@ -209,7 +293,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
// Copied object is not encrypted so just put it
// without a cipherBundle
return data.put(null, hashedStream, numberPartSize,
dataStoreContext, log, (error, partRetrievalInfo) => {
dataStoreContext, backendInfo,
log, (error, partRetrievalInfo) => {
if (error) {
log.debug('error putting object part',
{ error });
@ -232,23 +317,21 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
// Digest the final combination of all of the part streams
totalHash = totalHash.digest('hex');
return next(null, locations, totalHash,
copyObjectSize, mpuBucket,
serverSideEncryption, destBucketMD);
return next(null, destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption);
});
},
function getExistingPartInfo(locations, totalHash,
copyObjectSize, mpuBucket, serverSideEncryption, destBucketMD,
next) {
function getExistingPartInfo(destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption, next) {
const partKey =
`${uploadId}${constants.splitter}${paddedPartNumber}`;
metadata.getObjectMD(mpuBucket.getName(), partKey, log,
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
(err, result) => {
// If there is nothing being overwritten just move on
if (err && !err.NoSuchKey) {
log.debug('error getting current part (if any)',
{ error: err });
return next(err, destBucketMD);
return next(err);
}
let oldLocations;
if (result) {
@ -259,14 +342,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
oldLocations = Array.isArray(oldLocations) ?
oldLocations : [oldLocations];
}
return next(null, locations, totalHash,
copyObjectSize, mpuBucket, serverSideEncryption,
oldLocations, destBucketMD);
return next(null, destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption,
oldLocations);
});
},
function storeNewPartMetadata(locations, totalHash,
copyObjectSize, mpuBucket, serverSideEncryption,
oldLocations, destBucketMD, next) {
function storeNewPartMetadata(destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption, oldLocations,
next) {
const lastModified = new Date().toJSON();
const metaStoreParams = {
partNumber: paddedPartNumber,
@ -276,25 +359,26 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
splitter: constants.splitter,
lastModified,
};
return services.metadataStorePart(mpuBucket.getName(),
return services.metadataStorePart(mpuBucketName,
locations, metaStoreParams, log, err => {
if (err) {
log.debug('error storing new metadata',
{ error: err, method: 'storeNewPartMetadata' });
return next(err, destBucketMD);
return next(err);
}
// Clean up the old data now that new metadata (with new
// data locations) has been stored
if (oldLocations) {
data.batchDelete(oldLocations,
data.batchDelete(oldLocations, request.method, null,
logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids()));
}
return next(null, destBucketMD, totalHash, lastModified,
serverSideEncryption);
sourceVerId, serverSideEncryption);
});
},
], (err, destBucketMD, totalHash, lastModified, serverSideEncryption) => {
], (err, destBucketMD, totalHash, lastModified, sourceVerId,
serverSideEncryption) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destBucketMD);
if (err) {
@ -310,8 +394,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
'<ETag>&quot;', totalHash, '&quot;</ETag>',
'</CopyPartResult>',
].join('');
// TODO: Add version headers for response
// (if source is a version).
const additionalHeaders = corsHeaders || {};
if (serverSideEncryption) {
additionalHeaders['x-amz-server-side-encryption'] =
@ -322,6 +405,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
serverSideEncryption.masterKeyId;
}
}
additionalHeaders['x-amz-copy-source-version-id'] = sourceVerId;
// TODO push metric for objectPutCopyPart
// pushMetric('putObjectCopyPart', log, {
// bucket: destBucketName,
@ -329,3 +413,5 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
return callback(null, xml, additionalHeaders);
});
}
module.exports = objectPutCopyPart;

View File

@ -1,16 +1,17 @@
import assert from 'assert';
import async from 'async';
import { errors } from 'arsenal';
const assert = require('assert');
const async = require('async');
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import data from '../data/wrapper';
import { dataStore } from './apiUtils/object/storeObject';
import { isBucketAuthorized } from './apiUtils/authorization/aclChecks';
import kms from '../kms/wrapper';
import metadata from '../metadata/wrapper';
import { pushMetric } from '../utapi/utilities';
import { logger } from '../utilities/logger';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { BackendInfo } = require('./apiUtils/object/BackendInfo');
const constants = require('../../constants');
const data = require('../data/wrapper');
const { dataStore } = require('./apiUtils/object/storeObject');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const logger = require('../utilities/logger');
// We pad the partNumbers so that the parts will be sorted in numerical order.
@ -44,7 +45,7 @@ function _getPartKey(uploadId, splitter, paddedPartNumber) {
* @param {function} cb - final callback to call with the result
* @return {undefined}
*/
export default function objectPutPart(authInfo, request, streamingV4Params, log,
function objectPutPart(authInfo, request, streamingV4Params, log,
cb) {
log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength;
@ -83,33 +84,35 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
return async.waterfall([
// Get the destination bucket.
next => metadata.getBucket(bucketName, log, (err, bucket) => {
next => metadata.getBucket(bucketName, log,
(err, destinationBucket) => {
if (err && err.NoSuchBucket) {
return next(errors.NoSuchBucket);
return next(errors.NoSuchBucket, destinationBucket);
}
if (err) {
log.error('error getting the destination bucket', {
error: err,
method: 'objectPutPart::metadata.getBucket',
});
return next(err, bucket);
return next(err, destinationBucket);
}
return next(null, bucket);
return next(null, destinationBucket);
}),
// Check the bucket authorization.
(bucket, next) => {
(destinationBucket, next) => {
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = 'objectPut';
if (!isBucketAuthorized(bucket, requestType, canonicalID)) {
if (!isBucketAuthorized(destinationBucket, requestType,
canonicalID)) {
log.debug('access denied for user on bucket', { requestType });
return next(errors.AccessDenied, bucket);
return next(errors.AccessDenied, destinationBucket);
}
return next(null, bucket);
return next(null, destinationBucket);
},
// Get bucket server-side encryption, if it exists.
(bucket, next) => {
const encryption = bucket.getServerSideEncryption();
(destinationBucket, next) => {
const encryption = destinationBucket.getServerSideEncryption();
// If bucket has server-side encryption, pass the `res` value
if (encryption) {
return kms.createCipherBundle(encryption, log, (err, res) => {
@ -118,61 +121,68 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
'the destination bucket', {
error: err,
});
return next(err, bucket);
return next(err, destinationBucket);
}
return next(null, res, bucket);
return next(null, destinationBucket, res);
});
}
// The bucket does not have server-side encryption, so pass `null`
return next(null, null, bucket);
return next(null, destinationBucket, null);
},
// Get the MPU shadow bucket.
(cipherBundle, bucket, next) => metadata.getBucket(mpuBucketName, log,
(destinationBucket, cipherBundle, next) =>
metadata.getBucket(mpuBucketName, log,
(err, mpuBucket) => {
if (err && err.NoSuchBucket) {
return next(errors.NoSuchUpload, bucket);
return next(errors.NoSuchUpload, destinationBucket);
}
if (err) {
log.error('error getting the shadow mpu bucket', {
error: err,
method: 'objectPutPart::metadata.getBucket',
});
return next(err, bucket);
return next(err, destinationBucket);
}
let splitter = constants.splitter;
// BACKWARD: Remove to remove the old splitter
if (mpuBucket.getMdBucketModelVersion() < 2) {
splitter = constants.oldSplitter;
}
return next(null, cipherBundle, splitter, bucket);
return next(null, destinationBucket, cipherBundle, splitter);
}),
// Check authorization of the MPU shadow bucket.
(cipherBundle, splitter, bucket, next) => {
(destinationBucket, cipherBundle, splitter, next) => {
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
uploadId);
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, log,
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
(err, res) => {
if (err) {
log.error('error getting the object from mpu bucket', {
error: err,
method: 'objectPutPart::metadata.getObjectMD',
});
return next(err, bucket);
return next(err, destinationBucket);
}
const initiatorID = res.initiator.ID;
const requesterID = authInfo.isRequesterAnIAMUser() ?
authInfo.getArn() : authInfo.getCanonicalID();
if (initiatorID !== requesterID) {
return next(errors.AccessDenied, bucket);
return next(errors.AccessDenied, destinationBucket);
}
return next(null, cipherBundle, splitter, bucket);
const objectLocationConstraint =
res.controllingLocationConstraint;
return next(null, destinationBucket,
objectLocationConstraint,
cipherBundle, splitter);
});
},
// Get any pre-existing part.
(cipherBundle, splitter, bucket, next) => {
(destinationBucket, objectLocationConstraint, cipherBundle,
splitter, next) => {
const paddedPartNumber = _getPaddedPartNumber(partNumber);
const partKey = _getPartKey(uploadId, splitter, paddedPartNumber);
return metadata.getObjectMD(mpuBucketName, partKey, log,
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
(err, res) => {
// If there is no object with the same key, continue.
if (err && !err.NoSuchKey) {
@ -180,7 +190,7 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
error: err,
method: 'objectPutPart::metadata.getObjectMD',
});
return next(err, bucket);
return next(err, destinationBucket);
}
let prevObjectSize = null;
let oldLocations = null;
@ -194,29 +204,38 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
oldLocations = Array.isArray(res.partLocations) ?
res.partLocations : [res.partLocations];
}
return next(null, cipherBundle, partKey, prevObjectSize,
oldLocations, bucket);
return next(null, destinationBucket,
objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations);
});
},
// Store in data backend.
(cipherBundle, partKey, prevObjectSize, oldLocations, bucket, next) => {
(destinationBucket, objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, next) => {
const objectKeyContext = {
bucketName,
owner: canonicalID,
namespace: request.namespace,
objectKey,
partNumber: _getPaddedPartNumber(partNumber),
uploadId,
};
const backendInfo = new BackendInfo(objectLocationConstraint);
return dataStore(objectKeyContext, cipherBundle, request, size,
streamingV4Params, log, (err, dataGetInfo, hexDigest) => {
streamingV4Params, backendInfo, log,
(err, dataGetInfo, hexDigest) => {
if (err) {
return next(err, bucket);
return next(err, destinationBucket);
}
return next(null, dataGetInfo, hexDigest, cipherBundle,
partKey, prevObjectSize, oldLocations, bucket);
return next(null, destinationBucket, dataGetInfo, hexDigest,
cipherBundle, partKey, prevObjectSize, oldLocations,
objectLocationConstraint);
});
},
// Store data locations in metadata and delete any overwritten data.
(dataGetInfo, hexDigest, cipherBundle, partKey, prevObjectSize,
oldLocations, bucket, next) => {
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
// Use an array to be consistent with objectPutCopyPart where there
// could be multiple locations.
const partLocations = [dataGetInfo];
@ -238,29 +257,31 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
'content-md5': hexDigest,
'content-length': size,
};
return metadata.putObjectMD(mpuBucketName, partKey, omVal, log,
return metadata.putObjectMD(mpuBucketName, partKey, omVal, {}, log,
err => {
if (err) {
log.error('error putting object in mpu bucket', {
error: err,
method: 'objectPutPart::metadata.putObjectMD',
});
return next(err, bucket);
return next(err, destinationBucket);
}
// Clean up any old data now that new metadata (with new
// data locations) has been stored.
if (oldLocations) {
log.trace('Overwriting MPU part, deleting data');
data.batchDelete(oldLocations, logger
.newRequestLoggerFromSerializedUids(log
data.batchDelete(oldLocations, request.method,
objectLocationConstraint,
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids()));
}
return next(null, bucket, hexDigest, prevObjectSize);
return next(null, destinationBucket,
hexDigest, prevObjectSize);
});
},
], (err, bucket, hexDigest, prevObjectSize) => {
], (err, destinationBucket, hexDigest, prevObjectSize) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
request.method, destinationBucket);
if (err) {
log.error('error in object put part (upload part)', {
error: err,
@ -277,3 +298,5 @@ export default function objectPutPart(authInfo, request, streamingV4Params, log,
return cb(null, hexDigest, corsHeaders);
});
}
module.exports = objectPutPart;

100
lib/api/objectPutTagging.js Normal file
View File

@ -0,0 +1,100 @@
const async = require('async');
const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } =
require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const { parseTagXml } = require('./apiUtils/object/tagging');
/**
* Object Put Tagging - Adds tag(s) to object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function objectPutTagging(authInfo, request, log, callback) {
log.debug('processing request', { method: 'objectPutTagging' });
const bucketName = request.bucketName;
const objectKey = request.objectKey;
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', {
versionId: request.query.versionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
const reqVersionId = decodedVidResult;
const metadataValParams = {
authInfo,
bucketName,
objectKey,
requestType: 'bucketOwnerAction',
versionId: reqVersionId,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',
{ method: 'objectPutTagging', error: err });
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error no object metadata found',
{ method: 'objectPutTagging', error: err });
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
log.trace('version is a delete marker',
{ method: 'objectPutTagging' });
return next(errors.MethodNotAllowed, bucket);
}
return next(null, bucket, objectMD);
}),
(bucket, objectMD, next) => {
log.trace('parsing tag(s)');
parseTagXml(request.post, log, (err, tags) =>
next(err, bucket, tags, objectMD));
},
(bucket, tags, objectMD, next) => {
// eslint-disable-next-line no-param-reassign
objectMD.tags = tags;
const params = objectMD.versionId ? { versionId:
objectMD.versionId } : {};
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
log, err =>
next(err, bucket, objectMD));
},
], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.trace('error processing request', { error: err,
method: 'objectPutTagging' });
} else {
pushMetric('putObjectTagging', log, {
authInfo,
bucket: bucketName,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD);
}
return callback(err, additionalResHeaders);
});
}
module.exports = objectPutTagging;

View File

@ -1,7 +1,7 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import constants from '../../constants';
import services from '../services';
const constants = require('../../constants');
const services = require('../services');
/*
* Format of xml response:
@ -50,11 +50,11 @@ function generateXml(xml, owner, userBuckets, splitter) {
* @param {function} callback - callback
* @return {undefined}
*/
export default function serviceGet(authInfo, request, log, callback) {
function serviceGet(authInfo, request, log, callback) {
log.debug('processing request', { method: 'serviceGet' });
if (authInfo.isRequesterPublicUser()) {
log.warn('operation not available for public user');
log.debug('operation not available for public user');
return callback(errors.AccessDenied);
}
const xml = [];
@ -83,3 +83,5 @@ export default function serviceGet(authInfo, request, log, callback) {
splitter));
});
}
module.exports = serviceGet;

View File

@ -1,24 +1,23 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import metadata from '../metadata/wrapper';
import bucketShield from './apiUtils/bucket/bucketShield';
import {
findRoutingRule,
extractRedirectInfo,
} from './apiUtils/object/websiteServing';
import { isObjAuthorized } from './apiUtils/authorization/aclChecks';
import collectResponseHeaders from '../utilities/collectResponseHeaders';
import validateHeaders from '../utilities/validateHeaders';
import { pushMetric } from '../utapi/utilities';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { findRoutingRule, extractRedirectInfo } =
require('./apiUtils/object/websiteServing');
const { isObjAuthorized } = require('./apiUtils/authorization/aclChecks');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const validateHeaders = require('../utilities/validateHeaders');
const { pushMetric } = require('../utapi/utilities');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
/**
* _errorActions - take a number of actions once have error getting obj
* @param {object} err - arsenal errors object
* @param {string} errorDocument - key to get error document
* @param {object []} routingRules - array of routingRule objects
* @param {string} bucketName - bucket name from request
* @param {object} bucket - bucket metadata
* @param {string} objectKey - object key from request (or as translated in
* websiteGet)
* @param {object} corsHeaders - CORS-related response headers
@ -27,7 +26,8 @@ import { pushMetric } from '../utapi/utilities';
* @return {undefined}
*/
function _errorActions(err, errorDocument, routingRules,
bucketName, objectKey, corsHeaders, log, callback) {
bucket, objectKey, corsHeaders, log, callback) {
const bucketName = bucket.getName();
const errRoutingRule = findRoutingRule(routingRules,
objectKey, err.code);
if (errRoutingRule) {
@ -36,7 +36,7 @@ function _errorActions(err, errorDocument, routingRules,
objectKey);
}
if (errorDocument) {
return metadata.getObjectMD(bucketName, errorDocument, log,
return metadata.getObjectMD(bucketName, errorDocument, {}, log,
(errObjErr, errObjMD) => {
if (errObjErr) {
// error retrieving error document so return original error
@ -44,6 +44,13 @@ function _errorActions(err, errorDocument, routingRules,
// to true
return callback(err, true, null, corsHeaders);
}
// return the default error message if the object is private
// rather than sending a stored error file
if (!isObjAuthorized(bucket, errObjMD, 'objectGet',
constants.publicId)) {
log.trace('errorObj not authorized', { error: err });
return callback(err, true, null, corsHeaders);
}
const dataLocator = errObjMD.location;
if (errObjMD['x-amz-server-side-encryption']) {
for (let i = 0; i < dataLocator.length; i++) {
@ -73,7 +80,6 @@ function _errorActions(err, errorDocument, routingRules,
* @param {function} callback - callback to function in route
* @return {undefined}
*/
export default
function websiteGet(request, log, callback) {
log.debug('processing request', { method: 'websiteGet' });
const bucketName = request.bucketName;
@ -91,8 +97,6 @@ function websiteGet(request, log, callback) {
}
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
// bucket ACL's do not matter for website get since it is always the
// get of an object. object ACL's are what matter
const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) {
return callback(errors.NoSuchWebsiteConfiguration, false, null,
@ -130,7 +134,7 @@ function websiteGet(request, log, callback) {
// get object metadata and check authorization and header
// validation
return metadata.getObjectMD(bucketName, objectKey, log,
return metadata.getObjectMD(bucketName, objectKey, {}, log,
(err, objMD) => {
// Note: In case of error, we intentionally send the original
// object key to _errorActions as in case of a redirect, we do
@ -139,21 +143,25 @@ function websiteGet(request, log, callback) {
log.trace('error retrieving object metadata',
{ error: err });
let returnErr = err;
// AWS returns AccessDenied instead of NoSuchKey
if (err === errors.NoSuchKey) {
const bucketAuthorized = isBucketAuthorized(bucket,
'bucketGet', constants.publicId);
// if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error.
if (err === errors.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied;
}
return _errorActions(returnErr,
websiteConfig.getErrorDocument(), routingRules,
bucketName, reqObjectKey, corsHeaders, log, callback);
bucket, reqObjectKey, corsHeaders, log,
callback);
}
if (!isObjAuthorized(bucket, objMD, 'objectGet',
constants.publicId)) {
const err = errors.AccessDenied;
log.trace('request not authorized', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucketName, reqObjectKey, corsHeaders,
log, callback);
routingRules, bucket,
reqObjectKey, corsHeaders, log, callback);
}
const headerValResult = validateHeaders(objMD, request.headers);
@ -161,8 +169,8 @@ function websiteGet(request, log, callback) {
const err = headerValResult.error;
log.trace('header validation error', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucketName, reqObjectKey, corsHeaders,
log, callback);
routingRules, bucket, reqObjectKey,
corsHeaders, log, callback);
}
// check if object to serve has website redirect header
// Note: AWS prioritizes website configuration rules over
@ -200,3 +208,5 @@ function websiteGet(request, log, callback) {
});
});
}
module.exports = websiteGet;

View File

@ -1,17 +1,16 @@
import { errors } from 'arsenal';
const { errors } = require('arsenal');
import collectCorsHeaders from '../utilities/collectCorsHeaders';
import constants from '../../constants';
import metadata from '../metadata/wrapper';
import bucketShield from './apiUtils/bucket/bucketShield';
import {
findRoutingRule,
extractRedirectInfo,
} from './apiUtils/object/websiteServing';
import { isObjAuthorized } from './apiUtils/authorization/aclChecks';
import collectResponseHeaders from '../utilities/collectResponseHeaders';
import validateHeaders from '../utilities/validateHeaders';
import { pushMetric } from '../utapi/utilities';
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { findRoutingRule, extractRedirectInfo } =
require('./apiUtils/object/websiteServing');
const { isObjAuthorized } = require('./apiUtils/authorization/aclChecks');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const validateHeaders = require('../utilities/validateHeaders');
const { pushMetric } = require('../utapi/utilities');
const { isBucketAuthorized } = require('./apiUtils/authorization/aclChecks');
/**
@ -43,7 +42,7 @@ function _errorActions(err, routingRules, objectKey, corsHeaders, log,
* @param {function} callback - callback to function in route
* @return {undefined}
*/
export default function websiteHead(request, log, callback) {
function websiteHead(request, log, callback) {
log.debug('processing request', { method: 'websiteHead' });
const bucketName = request.bucketName;
const reqObjectKey = request.objectKey ? request.objectKey : '';
@ -95,7 +94,7 @@ export default function websiteHead(request, log, callback) {
// get object metadata and check authorization and header
// validation
return metadata.getObjectMD(bucketName, objectKey, log,
return metadata.getObjectMD(bucketName, objectKey, {}, log,
(err, objMD) => {
// Note: In case of error, we intentionally send the original
// object key to _errorActions as in case of a redirect, we do
@ -104,8 +103,11 @@ export default function websiteHead(request, log, callback) {
log.trace('error retrieving object metadata',
{ error: err });
let returnErr = err;
// AWS returns AccessDenied instead of NoSuchKey
if (err === errors.NoSuchKey) {
const bucketAuthorized = isBucketAuthorized(bucket,
'bucketGet', constants.publicId);
// if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error.
if (err === errors.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied;
}
return _errorActions(returnErr, routingRules,
@ -152,3 +154,5 @@ export default function websiteHead(request, log, callback) {
});
});
}
module.exports = websiteHead;

View File

@ -1,4 +1,4 @@
import constants from '../../constants';
const constants = require('../../constants');
/**
* Class containing requester's information received from Vault
@ -7,7 +7,7 @@ import constants from '../../constants';
* @return {AuthInfo} an AuthInfo instance
*/
export default class AuthInfo {
class AuthInfo {
constructor(objectFromVault) {
const { arn, canonicalID, shortid, email,
accountDisplayName, IAMdisplayName } = objectFromVault;
@ -50,3 +50,5 @@ export default class AuthInfo {
return this.canonicalID === constants.publicId;
}
}
module.exports = AuthInfo;

View File

@ -1,10 +1,11 @@
import { errors } from 'arsenal';
import crypto from 'crypto';
const { errors } = require('arsenal');
import config from '../../Config';
import Indexer from './indexer';
const crypto = require('crypto');
import { calculateSigningKey, hashSignature } from './vaultUtilities';
const { config } = require('../../Config');
const Indexer = require('./indexer');
const { calculateSigningKey, hashSignature } = require('./vaultUtilities');
const authIndex = new Indexer(config.authData);
@ -156,6 +157,9 @@ const backend = {
* @param {object} requestContextParams.paramaterize - params that have
* arrays as values since a requestContext needs to be constructed with
* each option in Vault
* @param {object[]} requestContextParams.paramaterize.specificResource -
* specific resources paramaterized as an array of objects containing
* properties `key` and optional `versionId`
* @param {string} userArn - arn of requesting user
* @param {object} log - log object
* @param {function} cb - callback with either error or an array
@ -165,14 +169,14 @@ const backend = {
*/
checkPolicies: (requestContextParams, userArn, log, cb) => {
let results;
const specificResourceParams =
requestContextParams.parameterize.specificResource;
if (specificResourceParams) {
const parameterizeParams = requestContextParams.parameterize;
if (parameterizeParams && parameterizeParams.specificResource) {
// object is parameterized
results = specificResourceParams.map(obj => ({
results = parameterizeParams.specificResource.map(obj => ({
isAllowed: true,
arn: _buildArn(requestContextParams
.constantParams.generalResource, obj),
.constantParams.generalResource, obj.key),
versionId: obj.versionId,
}));
} else {
results = [{
@ -191,4 +195,4 @@ const backend = {
},
};
export default backend;
module.exports = backend;

View File

@ -0,0 +1,25 @@
/** build simple authdata with only one account
* @param {string} accessKey - account's accessKey
* @param {string} secretKey - account's secretKey
* @return {object} authdata - authdata with account's accessKey and secretKey
*/
function buildAuthDataAccount(accessKey, secretKey) {
return {
accounts: [{
name: 'CustomAccount',
email: 'customaccount1@setbyenv.com',
arn: 'aws::iam:123456789012:root',
canonicalID: '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' +
'18e7cd47qwer',
shortid: '123456789012',
keys: [{
access: accessKey,
secret: secretKey,
}],
}],
};
}
module.exports = {
buildAuthDataAccount,
};

View File

@ -1,7 +1,7 @@
import { Logger } from 'werelogs';
const werelogs = require('werelogs');
// Here, we expect the logger to have already been configured in S3
const log = new Logger('S3');
const log = new werelogs.Logger('S3');
function incr(count) {
if (count !== undefined) {
@ -158,7 +158,7 @@ function dumpErrors(checkData) {
* @return {boolean} true on erroneous data
* false on success
*/
export default function check(authdata) {
function check(authdata) {
const checkData = {
errors: [],
emails: [],
@ -180,3 +180,5 @@ export default function check(authdata) {
return dumpErrors(checkData);
}
module.exports = check;

Some files were not shown because too many files have changed in this diff Show More