Compare commits

..

708 Commits

Author SHA1 Message Date
bert-e 0509e8ecd3 Merge branch 'bugfix/ARSN-143/include-json-files-in-tsc-include' into tmp/octopus/w/8.1/bugfix/ARSN-143/include-json-files-in-tsc-include 2022-04-12 22:30:14 +00:00
Ronnie Smith 42ccd59526
bugfix: ARSN-143 include json files in tsc build 2022-04-12 15:28:21 -07:00
bert-e 6c6ee31f34 Merge branch 'q/1776/7.10/feature/ARSN-128/update-package-version' into tmp/normal/q/8.1 2022-04-12 18:37:07 +00:00
Ronnie Smith b58b4d0773
Merge remote-tracking branch 'origin/feature/ARSN-128/update-package-version' into w/8.1/feature/ARSN-128/update-package-version 2022-04-11 18:21:13 -07:00
Ronnie Smith 9a0915d40e
feature: ARSN-128 fix linting issues 2022-04-06 14:47:38 -07:00
Ronnie Smith 36d3a67a68
Merge remote-tracking branch 'origin/feature/ARSN-128/move-metdata-data-from-8-to-7' into w/8.1/feature/ARSN-128/move-metdata-data-from-8-to-7 2022-04-06 14:44:26 -07:00
Xin LI 3d156a58dd bugfix: ARSN-129-downgrade-socketio 2022-03-31 14:26:00 +02:00
Xin LI 7737ec4904 bugfix: ARSN-129-upgrade-socketio-fix-critical 2022-03-31 11:39:04 +02:00
Kerkesni d18f4d10bd
bump version 8.1.38 2022-03-25 14:18:53 +01:00
Kerkesni e0bc4383cd
bugfix: ARSN-124 add missing vFormat param 2022-03-25 14:17:57 +01:00
bert-e de17f221bf Merge branch 'bugfix/ARSN-116-fix-listing-master-returning-phd' into q/8.1 2022-03-25 09:46:53 +00:00
Kerkesni d46301b498
bump version 8.1.37 2022-03-25 10:32:35 +01:00
Kerkesni 0bb2a44912
bugfix: ARSN-116 Fixed listing of master keys
When the last version of an object is deleted
a placeholder master key is created and is kept
for 15 seconds before it gets repaired.

To avoid listing the PHD keys we added a transform
stream that replaces PHD master keys with the last
version of that object

Added tests for PHD cases + fixed unit tests
2022-03-25 10:32:35 +01:00
Guillaume Hivert 2c1fb773fd ARSN-100 Forward merge 2022-03-24 15:10:11 +01:00
Xin.LI 3528c24276
Update errors/arsenalErrors.json
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2022-03-21 14:13:26 +01:00
Xin LI 6d8294d0c0 improvement: ARSN-123 bump version 2022-03-21 13:31:53 +01:00
Xin LI 23bfc17a26 improvement: ARSN-123 modify BucketAlreadyOwnedByYou error description 2022-03-21 13:27:17 +01:00
bert-e 0f6a1f2982 Merge branch 'w/8.1/bugfix/ARSN-105/locations' into tmp/octopus/q/8.1 2022-03-16 14:55:09 +00:00
Nicolas Humbert bff13f1190 ARSN-105 test: add properties to ObjectMD location property 2022-03-15 15:30:02 -04:00
bert-e c857e743c8 Merge branch 'w/7.10/bugfix/ARSN-105/locations' into tmp/octopus/w/8.1/bugfix/ARSN-105/locations 2022-03-15 18:47:38 +00:00
Kerkesni 5f8edd35e9
v8.1.34 2022-03-15 14:35:49 +01:00
Kerkesni 3c4359b696
bugfix: ARSN-115 Fix listing algo returning phd
In the metadata backend, PHD master is not created in v1 hence
it wasn't considered in the skipping method, in the Mongo implementation
however the PHD keys need to be taken into consideration as they are still
created
2022-03-15 14:35:49 +01:00
Kerkesni 8ecf1d9808
feature: ARSN-112 bump to v8.1.33 2022-03-14 18:14:43 +01:00
Kerkesni 74e4934654
feature: ARSN-83 update putObject function & tests
A new step is added to each of the putObjectVerCase functions
(except the ones that get called when versionning is off) where
we check if the version we are putting is a delete marker and if
it's the latest version then we delete the master.
2022-03-14 15:10:54 +01:00
Kerkesni eac87fc9de
feature: ARSN-83 update deleteObject functions & tests
Master version gets deleted when last version is a delete marker.
When deleting a versioned object we add a new step where we look for
the latest version if master was not found.
When a version is deleted the master is automatically created/updated
and set to a PHD value, we then look for the latest version if it's
a delete marker or if no other versions available the PHD master gets
deleted, else it gets repaired asynchronously.
2022-03-14 15:10:44 +01:00
Kerkesni e2be4d895d
feature: ARSN-83 update getObject function & tests
Master version is deleted when last version is a delete marker,
thus we don't return NoSuchKey error anymore when we don't find a master.
Now we look for the latest version and return it if it exists when master not found
2022-03-14 15:10:32 +01:00
bert-e c0f7ebbaa9 Merge branch 'feature/ARSN-76-support-new-bucket-format' into q/8.1 2022-03-14 11:00:03 +00:00
Kerkesni 60fcedc251
feature: ARSN-76 Updated the MongoClientInterface unit tests
Updated tests to use the correct object keys when querying the db
2022-03-14 11:54:31 +01:00
Kerkesni 10ef395501
feature: ARSN-76 Updated deleteObjectWithCond & putObjectWithCond functions
Updated functions to use the new object key format
Added unit test + updated functional tests to use vFormat
2022-03-14 11:54:31 +01:00
Kerkesni d1c8e67901
feature: ARSN-76 Updated listObject function
Added support to listing algo returning parameters requesting
dual synchronized listings, as now for v1 buckets master and version
keys have different prefixes

Added unit tests and updated functional tests to list in both bucket formats

Updated listMultipartUploads to call the internal listing function with the
correct params
2022-03-14 11:54:31 +01:00
Kerkesni 266aabef37
feature: ARSN-76 Updated deleteObject functions
Updated deleteObject function and it's internal
functions to support the new objet key format

Added unit tests + updated functional tests to test
both formatting versions
2022-03-14 11:54:31 +01:00
Kerkesni b63c909808
feature: ARSN-76 updated getObject function
Updated getObject function to get bucket vFormat
and use it to correctly format the object key

Added unit tests & updated functional tests to test
both formatting versions

Updated functional tests to use proper versions to avoid
confusion
2022-03-14 11:54:30 +01:00
Kerkesni 02ee339214
feature: ARSN-76 Updated putObject functions
Updated putObject function to first get the bucket vFormat
attribut and pass it to internal functions

Updated putObjectVerCase functions to use formatting helper
functions to take into account the new vFormat bucket attribut

Updated getLatestVersion to take into account the new key formatting

Added unit tests & updated functional tests to use both key formats
2022-03-14 11:54:30 +01:00
Kerkesni 5ca7f86350
feature: ARSN-76 Added support for new bucket key format attribut
Bucket metadata now has a new attribut vFormat that stores
the version of key formatting used for the bucket's objects

Added utility helper functions that format the keys according to
the new attribut + unit tests

Added chache for the vFormat attribut, because of the additional
db calls that will be added

Updated putBucketAttributes so that it only updates the required
values otherwise it overwrites the vFormat

Added helper function that gets bucket vFormat
2022-03-14 11:54:30 +01:00
Kerkesni 50a4fd8dc1
improvement: ARSN-110 document new bucket key format 2022-03-14 11:43:45 +01:00
bert-e 5de0c2a7da Merge branch 'improvement/ARSN-88-add-MongoClientInterface-tests' into q/8.1 2022-03-01 15:34:24 +00:00
Kerkesni b942516dca
improvement: ARSN-88 Fix withCond tests 2022-03-01 16:29:29 +01:00
Kerkesni 54181af522
improvement: ARSN-88 Add deleteObjectMD tests 2022-03-01 16:28:04 +01:00
Kerkesni 21af204956
improvement: ARSN-88 Add getObjectMD tests 2022-03-01 16:25:27 +01:00
Kerkesni 68a27be345
improvement: ARSN-88 Add listObject tests 2022-03-01 16:25:17 +01:00
Kerkesni 06350ffe15
improvement: ARSN-88 Add putObjectMD tests 2022-03-01 16:24:19 +01:00
Taylor McKinnon 5da4cd88ff v8.1.32 2022-02-24 11:26:58 -08:00
bert-e 6bb68ee0e3 Merge branch 'feature/ARSN-75/support_abortmpu_put' into q/8.1 2022-02-24 19:09:16 +00:00
Taylor McKinnon 9a4bae40e6 ft(ARSN-75): Add support for AbortMPU PUT 2022-02-24 10:15:15 -08:00
bert-e 54e9635cab Merge branch 'feature/ARSN-84-switch-to-jest' into q/8.1 2022-02-24 09:52:09 +00:00
Vianney Rancurel b8f803338b ft: ARSN-95 Skip missing in index.js 2022-02-23 15:32:17 -08:00
Guillaume Hivert 4a1215adb5 ARSN-84 Fix coverage by using Istanbul and Jest
Jest coverage is disfunctionning because it's not able to cover code in
spawned subprocesses. Istanbul can, so the final process is to launch
nyc and jest together. Jest emit some coverage and nyc is getting the
coverage and its own coverage to emit the proper final coverage files.
2022-02-23 14:36:34 +01:00
Guillaume Hivert fc8d7532c6 ARSN-84 Correct Jest configuration for test suites and coverage
Thanks to files renaming, we can follow as much as we can the jest
default configurations. The options are gone, and we're specifying only
the maxWorkers (because the test suite is linear, and bugs if we're
running it in parallel) and the collect coverage files.
The coverage script itself is joined into one command instead of three
to leverage the Jest builtin coverage.
2022-02-23 14:36:34 +01:00
Guillaume Hivert 1818bfe6c8 ARSN-84 Rename all test files from [name].js to [name].spec.js
In order to simplify jest configuration, we have to remane the files to
follow the jest convention (to have a .spec.js extension for test
files).
2022-02-23 14:36:34 +01:00
Guillaume Hivert 5cd929ea8a ARSN-84 Fix Jest timeout for long HealthProbeServer 2022-02-23 14:36:34 +01:00
Guillaume Hivert 1138ce43af ARSN-84 Fix Jest bug in _arsenalError
You can check out the bug at
https://github.com/facebook/jest/issues/2549.
The bug in inherent to jest and is a known bug since years, because jest
is switching the VM from node to a custom VM from jest. Jest injects
its own set of globals. The Error provided by Jest is different from
the Error provided by node and the test `err instanceof Error` is false.
Error:
```
 Expected value to be equal to:
      true
 Received:
      false
```
2022-02-23 14:36:34 +01:00
Guillaume Hivert 8b4e9cc0aa ARSN-84 Fix redis commands in functional tests
The switch from mocha to Jest introduces some tests bugs.
As far as we can tell, jest is quicker than mocha, creating some
weird behaviour: some commands send to redis (with ioredis)
are working, and some aren’t. Our conclusion is that redis needs
to queue requests offline to avoid micro-disconnections from
redis in development. Otherwise, we got the following error:
```
  - StatsModel class › should correctly record a new request by default
one increment

    assert.ifError(received, expected)

    Expected value ifError to:
      null
    Received:
      [Error: Stream isn't writeable and enableOfflineQueue options is
false]

    Message:
      ifError got unwanted exception: Stream isn't writeable and
enableOfflineQueue options is false
```
Switching enableOfflineQueue to true makes the test suite to
success.
2022-02-23 14:36:34 +01:00
Guillaume Hivert ff6ea2a6d5 ARSN-84 Fix linting with correct indentation and trailing commas 2022-02-23 14:36:34 +01:00
Guillaume Hivert 3b3600db92 ARSN-84 Introduce Jest and reconfigure ESLint
Add Jest as a test runner as a mocha replacement to have the
TS compiling on the fly and allowing mixed sources TS/JS in the
sources (and replacing the before and after of mocha with beforeAll
and afterAll of Jest), and adding some ESLint configuration to make
ESLint happy.
2022-02-23 14:35:49 +01:00
bert-e 51c5247d01 Merge branch 'bugfix/ARSN-86-reactivate-ft-tests' into q/8.1 2022-02-19 16:17:26 +00:00
Vianney Rancurel 7813a312b5 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-87-versioning-exports-missing' into w/8.1/feature/ARSN-87-versioning-exports-missing 2022-02-18 17:15:32 -08:00
Thomas Carmet 35a4552c0f ARSN-86 reactivate functional tests
Discovering by accident that we removed the functional tests
on 8.1 for arsenal a while back.
Re activating those tests
2022-02-18 16:17:07 -08:00
Vianney Rancurel 0dbdff3a00 ft: ARSN-85 export http utils for Armory 2022-02-18 15:11:16 -08:00
bert-e 80b91d724d Merge branches 'w/8.1/feature/ARSN-64-sorted-set' and 'q/1714/7.10/feature/ARSN-64-sorted-set' into tmp/octopus/q/8.1 2022-02-16 23:00:46 +00:00
bert-e 40843d4bed Merge branch 'w/7.10/improvement/ARSN-46/rollback_unneeded_changes_stab' into tmp/octopus/w/8.1/improvement/ARSN-46/rollback_unneeded_changes_stab 2022-02-16 22:56:41 +00:00
bert-e b3fd77d08f Merge branch 'feature/ARSN-64-sorted-set' into tmp/octopus/w/8.1/feature/ARSN-64-sorted-set 2022-02-16 22:49:07 +00:00
Taylor McKinnon ed6bc63e75 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-46/rollback_unneeded_changes' into w/8.1/improvement/ARSN-46/rollback_unneeded_changes 2022-02-14 11:23:46 -08:00
Rached Ben Mustapha c95f84e887 Merge remote-tracking branch 'origin/feature/ARSN-62-awsv4-signature-session-token' into w/8.1/feature/ARSN-62-awsv4-signature-session-token 2022-02-08 22:55:47 +00:00
Nicolas Humbert 3c9ab1bb99 update linter 2022-02-07 20:50:40 +01:00
Nicolas Humbert 3c30adaf85 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-21/version' into w/8.1/feature/ARSN-21/version 2022-02-07 19:34:12 +01:00
bert-e 98edeae3f2 Merge branches 'w/8.1/feature/ARSN-21/UpgradeToNode16' and 'q/1687/7.10/feature/ARSN-21/UpgradeToNode16' into tmp/octopus/q/8.1 2022-02-07 17:06:52 +00:00
bert-e 4f15e4f267 Merge branches 'development/8.1' and 'w/7.10/feature/ARSN-21/UpgradeToNode16' into tmp/octopus/w/8.1/feature/ARSN-21/UpgradeToNode16 2022-02-07 17:04:19 +00:00
Xin LI 68c5b42e6f feature: ARSN-58-add-MD-actionMap 2022-02-02 23:55:36 +01:00
Xin LI 6933bb8422 Merge remote-tracking branch 'origin/development/8.1' into development/8.1
# Conflicts:
#	package.json
2022-02-02 23:55:08 +01:00
Xin LI 7e180fcad8 feature: ARSN-58-bump-version 2022-02-02 23:51:36 +01:00
Naren 41d482cf7d bf: ARSN-61 fix merge issues
Fix merge issues from ARSN-57.
2022-02-02 14:31:52 -08:00
Nicolas Humbert 1e334924f9 fix Update document requires atomic operators 2022-02-01 15:52:53 +01:00
Naren 49239cc76e Merge remote-tracking branch 'origin/w/7.10/bugfix/ARSN-57-correct-logging-client-ip' into w/8.1/bugfix/ARSN-57-correct-logging-client-ip 2022-01-28 17:23:48 -08:00
williamlardier 8d17fcac0f
ARSN-56: bump arsenal version to v8.1.23 2022-01-26 11:48:24 +01:00
williamlardier 1c3fcc5a65
ARSN-56: fix uppercase at the beginning of actionmap 2022-01-26 11:48:05 +01:00
Ronnie Smith f5b0f1e082
Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-21/UpgradeToNode16' into w/8.1/feature/ARSN-21/UpgradeToNode16 2022-01-24 14:34:29 -08:00
williamlardier 708aab707d
ARSN-55: v8.1.22 as tag 8.1.21 already exists 2022-01-24 16:22:40 +01:00
williamlardier 3a1cbdeedb
ARSN-55: add new action map 2022-01-24 16:22:09 +01:00
bert-e faf5701248 Merge branch 'w/7.10/feature/ARSN-54/RevertNode16Changes' into tmp/octopus/w/8.1/feature/ARSN-54/RevertNode16Changes 2022-01-20 23:21:12 +00:00
Ronnie Smith 4cbb5a5dd6
feature: ARSN-54 Revert Node16 changes 2022-01-20 13:07:05 -08:00
bert-e 22eca9b61c Merge branch 'w/7.10/feature/ARSN-54/RevertNode16Changes' into tmp/octopus/w/8.1/feature/ARSN-54/RevertNode16Changes 2022-01-20 20:19:44 +00:00
Naren 59a679831b Merge remote-tracking branch 'origin/improvement/ARSN-53-bump-to-7-10-6' into w/8.1/improvement/ARSN-53-bump-to-7-10-6 2022-01-19 19:54:13 -08:00
bert-e 26da124e27 Merge branches 'w/8.1/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' and 'q/1674/7.10/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' into tmp/octopus/q/8.1 2022-01-20 01:05:54 +00:00
bert-e 47b121c17b Merge branches 'w/8.1/improvement/ARSN-21-Upgrade-Node-to-16' and 'q/1649/7.10/improvement/ARSN-21-Upgrade-Node-to-16' into tmp/octopus/q/8.1 2022-01-20 00:09:24 +00:00
Ronnie Smith c605c1e1a2
feature: ARSN-21 add missing abort method 2022-01-19 15:17:57 -08:00
bert-e 994bd0a6be Merge branch 'bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' into tmp/octopus/w/8.1/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails 2022-01-19 22:41:56 +00:00
Ronnie Smith 1e2a6c387e
improvement: ARSN-21 remove close listener 2022-01-19 12:33:45 -08:00
Ronnie Smith 1348fc820f
Merge branch 'w/8.1/improvement/ARSN-21-Upgrade-Node-to-16' of github.com:scality/Arsenal into w/8.1/improvement/ARSN-21-Upgrade-Node-to-16 2022-01-19 12:03:38 -08:00
Ronnie Smith 79a363786f
feature: ARSN-21 update node-fnctl 2022-01-19 10:07:35 -08:00
bert-e 86e3c02126 Merge branches 'w/8.1/bugfix/ARSN-35/add-http-header-too-large-error' and 'q/1611/7.10/bugfix/ARSN-35/add-http-header-too-large-error' into tmp/octopus/q/8.1 2022-01-19 00:48:16 +00:00
bert-e 8f6731aa6a Merge branch 'w/7.10/bugfix/ARSN-35/add-http-header-too-large-error' into tmp/octopus/w/8.1/bugfix/ARSN-35/add-http-header-too-large-error 2022-01-18 17:43:38 +00:00
Artem Bakalov ea2f8ebd01 v8.1.19 2022-01-14 16:17:07 -08:00
Artem Bakalov b640bbb45e S3C-2818 - forwards 408 errors as 400 to client 2022-01-14 15:33:51 -08:00
Taylor McKinnon d9fcf275ce Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-46/add_isAborted_flag' into w/8.1/improvement/ARSN-46/add_isAborted_flag 2022-01-13 13:55:05 -08:00
Ronnie Smith 66b03695c3
feature: ARSN-21 update node-fnctl 2022-01-12 18:25:02 -08:00
Rahul Padigela 3575e651e3 clean yarn cache 2022-01-11 19:01:50 -08:00
Rahul Padigela fa19a34306 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-21-Upgrade-Node-to-16' into w/8.1/improvement/ARSN-21-Upgrade-Node-to-16 2022-01-11 18:38:44 -08:00
Xin LI 3ab7ef4e8d bugfix: ARSN-45 bump version 8.1.18 2022-01-06 10:07:21 +01:00
Xin LI e531d3eae1 bugfix: ARSN-45-mergePolicy-ignore-versionId 2022-01-05 23:24:25 +01:00
Nicolas Humbert 9ebcc9690e bump version 8.1.17 2021-12-14 10:36:50 -05:00
Nicolas Humbert 95759509cb ARSN-44 Expose backbeat metrics on standard path 2021-12-14 10:34:40 -05:00
williamlardier 6cdae52d57
improvement: ARSN-43 bump package version 2021-11-29 16:17:14 +01:00
williamlardier 995cb59db4
improvement: ARSN-43 support encrypted STS tokens 2021-11-29 15:35:37 +01:00
Alexander Chan 385e34b472 Merge remote-tracking branch 'origin/feature/ARSN-33/addExpirationHeaders' into w/8.1/feature/ARSN-33/addExpirationHeaders 2021-11-19 18:30:14 -08:00
Jonathan Gramain f102c5ec8c Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-42-addNullUploadIdField' into w/8.1/improvement/ARSN-42-addNullUploadIdField 2021-11-18 18:26:32 -08:00
bert-e e912617f02 Merge branch 'improvement/ARSN-39-support-OIDC-principals-and-conditions' into q/8.1 2021-11-12 16:02:39 +00:00
williamlardier 3abde0bc74
improvement: ARSN-39 support keycloak roles condition 2021-11-12 09:28:48 +01:00
bert-e cf49c7d8bf Merge branch 'bugfix/ARSN-40/countItemsParseContentLength' into q/8.1 2021-11-09 21:29:26 +00:00
Alexander Chan e6e49a70c9 ARSN-40: fix count-items helper to parse content-length as number
Update `MongoClientInterface::getObjectMDStats` to parse entries'
`content-length` as numbers. This is needed to avoid performing
calcuulation with poosible mixed types.

The ticket [ZENKO-3711](https://scality.atlassian.net/browse/ZENKO-3711)
tracks the source of the string typed `content-length` insert.
2021-11-09 11:07:27 -08:00
Rached Ben Mustapha 77f971957b feature: support mongodb collection sharding 2021-11-09 00:57:47 +00:00
Ronnie Smith ed1d6c12c2
feature: ARSN-34 Add patch location constraints to index
* also update package.json for new version
2021-11-05 09:35:42 -07:00
williamlardier 27f17f9535
improvement: ARSN-39 support keycloak groups condition 2021-11-05 14:58:29 +01:00
williamlardier 4658651593
improvement: ARSN-39 support OIDC principals 2021-11-05 14:54:38 +01:00
Jonathan Gramain 7af6a73b3b Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-38-replayPrefixHiddenInListings' into w/8.1/feature/ARSN-38-replayPrefixHiddenInListings 2021-11-04 15:23:54 -07:00
bert-e 8728ff5c80 Merge branch 'feature/ARSN-34_AddPatchLocations' into q/8.1 2021-11-04 21:12:47 +00:00
Ronnie Smith 7c16652e57
feature: ARSN-34 Add patch locations from cloudserver 2021-11-03 16:07:16 -07:00
bert-e 5a9d667936 Merge branch 'w/7.10/feature/ARSN-37-addUploadId' into tmp/octopus/w/8.1/feature/ARSN-37-addUploadId 2021-11-02 00:28:00 +00:00
Rahul Padigela 29dd069a5f chore: update version 2021-10-26 14:56:56 -07:00
Rahul Padigela f1793bfe51 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-31-update-version' into w/8.1/improvement/ARSN-31-update-version 2021-10-26 14:55:38 -07:00
Rahul Padigela b42f1d3943 Merge remote-tracking branch 'origin/w/7.10/bugfix/ARSN-31-invalid-query-params' into w/8.1/bugfix/ARSN-31-invalid-query-params 2021-10-25 17:29:33 -07:00
Naren c27b359fba improvement: ARSN-30 update arsenal version
update arsenal version to 8.1.7
2021-10-22 13:47:46 -07:00
Alexandre Lavigne bb8bdbc6ea ZENKO-36446 - use bucket name in delete object tagging
(cherry picked from commit 3205ecf8c7)
2021-10-22 13:26:39 -07:00
Nicolas Humbert 413f0c9433 ARSN-27 update version to 8.1.6 2021-10-19 16:01:09 -04:00
Nicolas Humbert ab3fa2f13d ARSN-26 interrogate the default region for getBucketLocation 2021-10-19 11:01:17 -04:00
Naren bfbda5d38b improvement: ARSN-25 update version to 8.1.5 2021-10-12 13:18:12 -07:00
Naren 2e6b1791bb improvement: ARSN-24 use azure-storage@2.10.3 2021-10-11 21:42:17 -07:00
Naren 1f8cfecf43 Revert "ARSN-22 - Fix bug in `onPutOgjectTaggig` and `getObjectTagging`"
This reverts commit 6a250feea9.
2021-10-11 21:38:33 -07:00
Alexandre Lavigne 6a250feea9
ARSN-22 - Fix bug in `onPutOgjectTaggig` and `getObjectTagging`
Use actual string instead of object when building object key
(for the bucket part).
2021-10-05 11:09:00 +02:00
Thomas Carmet 0a33d4b74e ARSN-20 remove condition regarding CI behavior 2021-09-23 11:42:36 -07:00
Thomas Carmet 9a544b9890 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-20-migrate-github-actions' into w/8.1/feature/ARSN-20-migrate-github-actions 2021-09-23 11:42:11 -07:00
Ronnie Smith a2b6846e2e
bugfix: ARSN-19 Add probe utils to index.js 2021-09-17 13:38:18 -07:00
Ronnie Smith 3fdfc7196b
feature: ARSN-19 bump version 2021-09-17 11:08:20 -07:00
Ronnie Smith f602fb9601
feature: ARSN-18 and ARSN-19
* Probe server should not check for strings to handle 500
* Move sendError and sendSuccess to Util.js
* Export sendError and sendSuccess for public uses
2021-09-16 16:11:32 -07:00
Thomas Carmet c237a25448 Merge remote-tracking branch 'origin/feature/ARSN-17-fixup-7-10-mistake' into w/8.1/feature/ARSN-17-fixup-7-10-mistake 2021-08-31 10:57:53 -07:00
Thomas Carmet 5aaec6a4e6 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-17-setup-package.json' into w/8.1/feature/ARSN-17-setup-package.json 2021-08-31 10:00:01 -07:00
Thomas Carmet 11278e7334 ARSN-16 pin sproxydclient version 2021-08-30 13:44:17 -07:00
bert-e c0fe2efbc2 Merge branch 'w/7.10/feature/ARSN-12-bumpArsenalVersion-stabilization' into tmp/octopus/w/8.1/feature/ARSN-12-bumpArsenalVersion-stabilization 2021-08-26 21:48:37 +00:00
Jonathan Gramain b0633d8a13 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-12-bumpArsenalVersion' into w/8.1/feature/ARSN-12-bumpArsenalVersion 2021-08-26 14:38:09 -07:00
bert-e b27caf5814 Merge branch 'w/7.10/feature/ARSN-12-condition-put-backport' into tmp/octopus/w/8.1/feature/ARSN-12-condition-put-backport 2021-08-25 21:07:37 +00:00
bert-e f5f6cb5692 Merge branch 'w/7.10/feature/ARSN-12-introduce-cond-put' into tmp/octopus/w/8.1/feature/ARSN-12-introduce-cond-put 2021-08-25 20:54:21 +00:00
bert-e 87ba4a7b4a Merge branches 'w/8.1/feature/ARSN-11-bump-werelogs' and 'q/1538/7.10/feature/ARSN-11-bump-werelogs' into tmp/octopus/q/8.1 2021-08-13 17:56:09 +00:00
bert-e 9ff605f875 Merge branch 'w/7.10/improvement/ARSN-13-expose-isResourceApplicable-evaluator' into tmp/octopus/w/8.1/improvement/ARSN-13-expose-isResourceApplicable-evaluator 2021-08-13 03:08:53 +00:00
Thomas Carmet 4e160db87d Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-11-bump-werelogs' into w/8.1/feature/ARSN-11-bump-werelogs 2021-08-12 10:08:43 -07:00
bert-e dc698f4d5c Merge branch 'improvement/ARSN-9-kmipDeepHealthcheck' into tmp/octopus/w/8.1/improvement/ARSN-9-kmipDeepHealthcheck 2021-08-04 20:07:07 +00:00
bert-e 8c7907f753 Merge branch 'bugfix/ARSN-8_RemoveHTTPCodeAndMessageFromLog' into tmp/octopus/w/8.1/bugfix/ARSN-8_RemoveHTTPCodeAndMessageFromLog 2021-08-03 18:12:57 +00:00
bert-e 395a881d92 Merge branch 'w/7.10/bugfix/ARSN-7_SkipHeadersOn304' into tmp/octopus/w/8.1/bugfix/ARSN-7_SkipHeadersOn304 2021-07-30 23:46:09 +00:00
bert-e 3d6306d2a3 Merge branches 'w/8.1/feature/ARSN-5/addBucketInfoUIDField' and 'q/1521/7.10/feature/ARSN-5/addBucketInfoUIDField' into tmp/octopus/q/8.1 2021-07-28 16:58:33 +00:00
bert-e 681740fbe7 Merge branch 'bugfix/ARSN-6/reverse-ARSN_3' into tmp/octopus/w/8.1/bugfix/ARSN-6/reverse-ARSN_3 2021-07-28 15:10:07 +00:00
Alexander Chan d381ec14d8 Merge remote-tracking branch 'origin/feature/ARSN-5/addBucketInfoUIDField' into w/8.1/feature/ARSN-5/addBucketInfoUIDField 2021-07-27 17:05:23 -07:00
bert-e 0bdcd866bc Merge branch 'bugfix/ARSN-4-exceptionWhenKMSIsDown' into tmp/octopus/w/8.1/bugfix/ARSN-4-exceptionWhenKMSIsDown 2021-07-23 18:49:46 +00:00
Jonathan Gramain 856a1634d4 Merge remote-tracking branch 'origin/improvement/S3C-4312-backbeatEncryptionSupport-fixup' into w/8.1/improvement/S3C-4312-backbeatEncryptionSupport-fixup 2021-07-21 13:32:59 -07:00
Jonathan Gramain 2921864aac feature: ARSN-2 fix renamed encryption helper
The name of the Cloudserver config helper "isAWSServerSideEncrytion"
has been renamed to "isAWSServerSideEncryption" to fix a typo, in commit
f336541f6a

This new name needs to be fixed in the AwsBackend data location.
2021-07-20 17:31:41 -07:00
bert-e 4665f3da5c Merge branch 'feature/ARSN-2_support_per_object_sse_in_data_wrapper' into q/8.1 2021-07-20 23:51:37 +00:00
Jonathan Gramain 0df0d952d2 Merge remote-tracking branch 'origin/improvement/S3C-4312-backbeatEncryptionSupport' into w/8.1/improvement/S3C-4312-backbeatEncryptionSupport 2021-07-20 14:48:13 -07:00
bert-e 54eb3ede5f Merge branch 'bugfix/ARSN-3/remove-dirty-old-expect-header-fix' into tmp/octopus/w/8.1/bugfix/ARSN-3/remove-dirty-old-expect-header-fix 2021-07-12 14:19:53 +00:00
bert-e be4dea481d Merge branch 'bugfix/ARSN-3/remove-dirty-old-expect-header-fix' into tmp/octopus/w/8.1/bugfix/ARSN-3/remove-dirty-old-expect-header-fix 2021-07-12 14:11:54 +00:00
Rached Ben Mustapha d15e2d5df6 Merge remote-tracking branch 'origin/feature/S3C-4505-fix-user-arn-validation' into w/8.1/feature/S3C-4505-fix-user-arn-validation 2021-07-09 00:25:16 +00:00
Taylor McKinnon 93503cf505 ft(ARSN-2): Support per object encryption in data wrapper 2021-07-07 11:12:04 -07:00
bert-e 0f63de2f05 Merge branch 'bugfix/ARSN-1/remove-contentmd5-check-azure' into tmp/octopus/w/8.1/bugfix/ARSN-1/remove-contentmd5-check-azure 2021-07-06 23:35:39 +00:00
bert-e 16a5e6a550 Merge branches 'w/8.1/feature/S3C-4614/assumerole' and 'q/1495/7.10/feature/S3C-4614/assumerole' into tmp/octopus/q/8.1 2021-06-29 21:40:05 +00:00
Rached Ben Mustapha 864d2e8a28 Merge remote-tracking branch 'origin/feature/S3C-4614/assumerole' into w/8.1/feature/S3C-4614/assumerole 2021-06-29 20:35:14 +00:00
vrancurel 15703aafca Merge remote-tracking branch 'origin/feature/S3C-4552-remove-test-duplicate' into w/8.1/feature/S3C-4552-remove-test-duplicate 2021-06-29 13:04:53 -07:00
bert-e db000bc5e1 Merge branches 'w/8.1/feature/S3C-4552-tiny-version-ids' and 'q/1480/7.10/feature/S3C-4552-tiny-version-ids' into tmp/octopus/q/8.1 2021-06-29 19:27:16 +00:00
vrancurel 06c35c15a5 Merge remote-tracking branch 'origin/feature/S3C-4552-tiny-version-ids' into w/8.1/feature/S3C-4552-tiny-version-ids 2021-06-29 11:43:26 -07:00
bert-e 68c8189f53 Merge branches 'w/8.1/improvement/S3C-4110/backport' and 'q/1479/7.10/improvement/S3C-4110/backport' into tmp/octopus/q/8.1 2021-06-29 12:15:38 +00:00
bert-e 041731e6eb Merge branch 'bugfix/S3C-3744-fixEncryptionActions' into tmp/octopus/w/8.1/bugfix/S3C-3744-fixEncryptionActions 2021-06-21 23:17:35 +00:00
Nicolas Humbert d51361ce06 S3C-4110 add lifecycle tests 2021-06-10 12:55:00 -05:00
Nicolas Humbert 453fd8b722 Merge remote-tracking branch 'origin/improvement/S3C-4110/backport' into w/8.1/improvement/S3C-4110/backport 2021-06-09 18:50:31 -05:00
bert-e 2621aa7e53 Merge branches 'w/8.1/bugfix/S3C-4257_StartSeqCanBeNull' and 'q/1472/7.10/bugfix/S3C-4257_StartSeqCanBeNull' into tmp/octopus/q/8.1 2021-06-08 08:18:01 +00:00
bert-e b4aeab77b9 Merge branch 'w/7.10/bugfix/S3C-4257_StartSeqCanBeNull' into tmp/octopus/w/8.1/bugfix/S3C-4257_StartSeqCanBeNull 2021-06-08 02:49:44 +00:00
bert-e e1a3b05330 Merge branches 'w/8.1/feature/S3C-3754_add_bucketDeleteEncryption_route' and 'q/1445/7.10/feature/S3C-3754_add_bucketDeleteEncryption_route' into tmp/octopus/q/8.1 2021-05-17 17:31:25 +00:00
bert-e 0151504158 Merge branch 'feature/S3C-3754_add_bucketDeleteEncryption_route' into tmp/octopus/w/8.1/feature/S3C-3754_add_bucketDeleteEncryption_route 2021-05-17 17:28:31 +00:00
bert-e 048e8b02bc Merge branch 'bugfix/S3C-4358-add-versioned-obj-lock-actions' into tmp/octopus/w/8.1/bugfix/S3C-4358-add-versioned-obj-lock-actions 2021-05-12 23:15:43 +00:00
bert-e 1d899efec8 Merge branches 'w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion' and 'q/1436/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into tmp/octopus/q/8.1 2021-05-10 20:18:36 +00:00
Taylor McKinnon 4cb8f715e9 Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion 2021-05-10 13:15:26 -07:00
bert-e 580dda4d48 Merge branch 'w/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into tmp/octopus/w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion 2021-05-10 20:02:51 +00:00
bert-e a17054e3a4
Merge branch 'w/7.10/feature/S3C-4073_AddProbeServerToIndex' into tmp/octopus/w/8.1/feature/S3C-4073_AddProbeServerToIndex 2021-05-07 10:34:05 -07:00
bert-e a8df2b7b96 Merge branch 'w/7.10/feature/S3C-4073_add-new-probe-server' into tmp/octopus/w/8.1/feature/S3C-4073_add-new-probe-server 2021-04-30 19:56:03 +00:00
Taylor McKinnon d572fc953b Merge remote-tracking branch 'origin/feature/S3C-3748_add_PutBucketEncyption_handler' into w/8.1/feature/S3C-3748_add_PutBucketEncyption_handler 2021-04-29 10:10:04 -07:00
Alexander Chan 2a78d4f413 ZENKO-3368: add auth chain backend 2021-04-24 12:47:55 -07:00
Alexander Chan d2c7165214 ZENKO-3368: reorganize auth backend files 2021-04-23 12:11:09 -07:00
bert-e 1999a586fd Merge branch 'feature/S3C-3751_add_GetBucketEncryption_route' into tmp/octopus/w/8.1/feature/S3C-3751_add_GetBucketEncryption_route 2021-04-21 18:42:12 +00:00
bert-e a1c0dd2472 Merge branches 'w/8.1/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' and 'q/1399/7.10/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' into tmp/octopus/q/8.1 2021-04-14 01:17:38 +00:00
bert-e a22032f9a5 Merge branch 'bugfix/S3C-4245_enforce_bypassgovernancemode_policy' into tmp/octopus/w/8.1/bugfix/S3C-4245_enforce_bypassgovernancemode_policy 2021-04-13 20:25:57 +00:00
bert-e dd38e32797 Merge branch 'bugfix/S3C-4239-log-consumer-readrecords-callback-error' into tmp/octopus/w/8.1/bugfix/S3C-4239-log-consumer-readrecords-callback-error 2021-04-12 17:49:16 +00:00
bert-e 274bf80720 Merge branch 'w/7.10/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' into tmp/octopus/w/8.1/bugfix/S3C-4275-versionListingWithDelimiterInefficiency 2021-04-10 00:16:31 +00:00
Ronnie Smith 25bd1f6111
Merge remote-tracking branch 'origin/w/7.10/feature/S3C-4262_BackportZenkoMetrics' into w/8.1/feature/S3C-4262_BackportZenkoMetrics 2021-04-06 02:46:29 -07:00
Jonathan Gramain 2d41b034aa Merge remote-tracking branch 'origin/w/7.10/dependabot/npm_and_yarn/development/7.4/mocha-8.0.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/mocha-8.0.1 2021-04-02 13:13:53 -07:00
Rached Ben Mustapha bb8ec629bf bugfix: revert azure-storage to known working version 2021-03-31 23:15:37 -07:00
Rached Ben Mustapha 4bbaa83b87 bf: upgrade to latest hdclient 2021-03-26 15:02:02 -07:00
bert-e 58697f7915 Merge branch 'feature/S3C-4172-custom-filter' into tmp/octopus/w/8.1/feature/S3C-4172-custom-filter 2021-03-19 00:20:08 +00:00
Ronnie Smith bf4a6fe01b
feature: ZENKO-3266 Code Coverage Tracking 2021-02-19 14:21:30 -08:00
alexandre merle c703ba66e7 bugfix: S3C-2804: Use ordered bulk write and refacto condition
BulkWrite was using a value of 1 to ordered which was
not correctly understood and lead to using unordered
batch write.
Refactor the catch of 11000 error to allow a more
specific catch and use a retry before sending back
an internal error in case there is a
race condition between multiple S3 connector on the
same object versionId.
2021-02-10 18:57:38 +01:00
alexandre merle 20c77f9f85 improv: ZENKO-2153: upgrade azure node sdk
Upgrade azure node sdk in order to get fixes
2021-02-10 18:41:52 +01:00
alexandre merle edb27cc9a8 bugfix: ZENKO-3240: return error when validateAndFilterMpuParts return an error 2021-02-10 18:41:52 +01:00
alexandre merle 79e0dfa38f bugfix: ZENKO-3241: considering rangeStart 0 to be valid 2021-02-10 18:41:52 +01:00
alexandre merle e1118803e6 bugfix: ZENKO-3242: wrong check for bakend data 2021-02-10 18:41:52 +01:00
bert-e 1230e72c49 Merge branch 'w/7.10/bugfix/S3C-3962-zero-size-stream' into tmp/octopus/w/8.1/bugfix/S3C-3962-zero-size-stream 2021-02-10 17:31:08 +00:00
bert-e 372df634c4 Merge branch 'bugfix/S3C-3904-more-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-more-s3-action-logs 2021-02-05 20:01:33 +00:00
bert-e 2b96888eb7 Merge branch 'w/7.9/bugfix/S3C-3904-better-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 18:15:28 +00:00
bert-e a0909885f1 Merge branch 'w/7.9/bugfix/S3C-3904-better-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 01:10:09 +00:00
alexandre merle 5d100645aa
Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-2201-econnreset-rest-client-keep-alive' into w/8.1/bugfix/S3C-2201-econnreset-rest-client-keep-alive 2021-01-25 20:58:32 +01:00
bert-e 356edf8478 Merge branches 'w/8.1/bugfix/S3C-3425-client-ip-extraction-logic' and 'q/1325/7.9/bugfix/S3C-3425-client-ip-extraction-logic' into tmp/octopus/q/8.1 2020-12-31 20:26:19 +00:00
bert-e 1cfb869631 Merge branch 'bugfix/S3C-3554-bucket-notif-iam-policy-eval' into tmp/octopus/w/8.1/bugfix/S3C-3554-bucket-notif-iam-policy-eval 2020-12-22 18:37:47 +00:00
bert-e 0403ca65fc Merge branch 'w/7.9/bugfix/S3C-3425-client-ip-extraction-logic' into tmp/octopus/w/8.1/bugfix/S3C-3425-client-ip-extraction-logic 2020-12-17 18:01:26 +00:00
Rahul Padigela 269e005198 improvement: S3C-3727 update deprecated Buffer usage 2020-12-16 17:52:15 -08:00
bert-e 10627f51d1 Merge branch 'w/7.9/improvement/S3C-3653-add-fields' into tmp/octopus/w/8.1/improvement/S3C-3653-add-fields 2020-12-02 07:29:04 +00:00
bert-e aa5f714081 Merge branch 'w/7.9/improvement/S3C-3475-add-actions-in-logs' into tmp/octopus/w/8.1/improvement/S3C-3475-add-actions-in-logs 2020-11-14 00:03:52 +00:00
Jonathan Gramain d27c0577ee bugfix: ZENKO-2866 abort request on backend if S3 client disconnects
Call request.abort() on the backend side when an S3 client
disconnects, to avoid leaking sockets. Also make sure request.abort()
through the stream destroy() call is called asynchronously from the
stream creation.
2020-11-10 11:10:19 -08:00
Jonathan Gramain ff539645ea bugfix: ZENKO-2866 unit test showing the bug
Add a unit test counting the number of open sockets after an S3 client
closes the connection before the data backend has sent a GET request:
with the fix, there should be none remaining open.
2020-11-09 17:36:23 -08:00
Jonathan Gramain e5c3bb188a test: ZENKO-2866 fix DummyObjectStream
- fix the DummyObjectStream test helper _read implementation

- separate tests of the helper itself into a separate file to ease
  reuse outside the mocha test framework
2020-11-09 17:13:41 -08:00
Jonathan Gramain 2461b5c2f7 bugfix: ZENKO-2905 avoid error callback call in external AWS GET
Don't call the callback a second time on stream error during the
execution of a GET from AWS external backend. This in turn would break
the logic up in the code and cause a crash of a worker.
2020-11-03 16:20:02 -08:00
Jonathan Gramain 747307cac2 bugfix: ZENKO-2905 failing unit test showing the double callback 2020-11-03 16:19:53 -08:00
Jonathan Gramain 5942d9d70c test: ZENKO-2905 unit test for external backend streamed GET
Add a new unit test that does a GET on external backends with a
range. For this, introduced mocking for the backend functions
getObject() (AWS style) and getBlobToStream (Azure style) that return
bytes from a virtual 1GB object.
2020-11-03 16:19:44 -08:00
bert-e 8ed84786fc Merge branch 'w/7.9/bugfix/S3C-3388-httpServerKeepAliveTimeoutOption' into tmp/octopus/w/8.1/bugfix/S3C-3388-httpServerKeepAliveTimeoutOption 2020-10-15 19:29:57 +00:00
bert-e 1e40e76bb2 Merge branches 'w/8.1/feature/S3C-3185-CredentialReport-policy-check' and 'q/1268/7.9/feature/S3C-3185-CredentialReport-policy-check' into tmp/octopus/q/8.1 2020-10-08 22:24:21 +00:00
bert-e f4058dd6ef Merge branch 'w/7.9/bugfix/S3C-3402-removeWrongErrorLog' into tmp/octopus/w/8.1/bugfix/S3C-3402-removeWrongErrorLog 2020-10-08 20:48:44 +00:00
bert-e 04f7692bad Merge branch 'w/7.9/feature/S3C-3185-CredentialReport-policy-check' into tmp/octopus/w/8.1/feature/S3C-3185-CredentialReport-policy-check 2020-10-08 18:33:33 +00:00
bert-e 32752ac504 Merge branch 'feature/S3C-1801-policy-tag-condition-keys' into tmp/octopus/w/8.1/feature/S3C-1801-policy-tag-condition-keys 2020-09-30 19:17:28 +00:00
vrancurel 549f187893 bf: ZENKO-2768 encode tags properly
- Force LogReader to use MongoUtils.unescape().
- Change MongoUtils escape/unescape to encode only the property names
and not the values.
- Add a unit test to check that the escape/unescape works.
2020-09-03 09:52:55 -07:00
bert-e 93cd582e3a Merge branch 'bugfix/S3C-3303-put-empty-notif-config' into tmp/octopus/w/8.1/bugfix/S3C-3303-put-empty-notif-config 2020-09-02 22:11:45 +00:00
vrancurel 2582108f97 bf: reserialize tags in putobjectver4
This function obtains the old tags by calling getLatestVersion() which
automatically unserialize the tags, so we should reserialize them before
update, as done e.g. in repair().
2020-09-01 15:12:39 -07:00
bert-e b25867f9c2 Merge branch 'bugfix/ZENKO-2702_hardcodedReplicaSetName' into q/8.1 2020-09-01 18:25:37 +00:00
bert-e 7b60166d08 Merge branch 'feature/S3C-3183-getAccessKeyLastUsed-policy-support' into tmp/octopus/w/8.1/feature/S3C-3183-getAccessKeyLastUsed-policy-support 2020-08-31 11:50:48 +00:00
bert-e 8887a67261 Merge branch 'feature/S3C-2798-get-bucket-notif-queuearn' into tmp/octopus/w/8.1/feature/S3C-2798-get-bucket-notif-queuearn 2020-08-26 20:19:06 +00:00
Ronnie Smith 437ecc57f9 bugfix: Use replica set config instead of rs0 2020-08-25 14:03:06 -07:00
bert-e 759f0ef949 Merge branch 'feature/S3C-2797-queue-arn-parsing' into tmp/octopus/w/8.1/feature/S3C-2797-queue-arn-parsing 2020-08-21 20:18:48 +00:00
bert-e 0014aa3467 Merge branch 'feature/S3C-2797-export-notification-configuration' into tmp/octopus/w/8.1/feature/S3C-2797-export-notification-configuration 2020-08-20 20:50:09 +00:00
Dora Korpar 1727f4bd3f ft:S3C-2797 add bucketinfo test 2020-08-20 13:13:40 -07:00
Dora Korpar d71c8eac86 Merge remote-tracking branch 'origin/feature/S3C-2797-bucketinfo-update' into w/8.1/feature/S3C-2797-bucketinfo-update 2020-08-20 11:33:10 -07:00
bert-e 7eb6304956 Merge branch 'feature/S3C-2798-get-bucket-notification' into tmp/octopus/w/8.1/feature/S3C-2798-get-bucket-notification 2020-08-20 17:29:34 +00:00
bert-e ce98e9d104 Merge branches 'w/8.1/feature/S3C-2797-put-bucket-notifications' and 'q/1231/7.8/feature/S3C-2797-put-bucket-notifications' into tmp/octopus/q/8.1 2020-08-20 17:15:22 +00:00
bert-e 36d932bbce Merge branch 'feature/S3C-2797-put-bucket-notifications' into tmp/octopus/w/8.1/feature/S3C-2797-put-bucket-notifications 2020-08-20 17:02:39 +00:00
bert-e 7f2c40cf6d Merge branch 'feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/w/8.1/feature/S3C-3229-bucketnotif-objmd-update 2020-08-10 20:33:35 +00:00
bert-e 6a78af0f39 Merge branch 'q/1160/7.8/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into tmp/normal/q/8.1 2020-07-21 00:44:31 +00:00
bert-e f73dc3dd68 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into tmp/normal/q/8.1 2020-07-21 00:44:31 +00:00
Jonathan Gramain 8ec0611d08 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0 2020-07-20 17:41:42 -07:00
Jonathan Gramain 6baca6f1e2 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/debug-2.6.9' into w/8.1/dependabot/npm_and_yarn/development/7.4/debug-2.6.9 2020-07-20 15:50:38 -07:00
bert-e 78d62636c3 Merge branch 'w/7.8/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-07 23:38:26 +00:00
Dora Korpar 9b8f813d02 S3C-3118 remove redundant test 2020-07-01 15:56:16 -07:00
Dora Korpar 0f70366774 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3118-flatten-retention-objmd' into w/8.1/feature/S3C-3118-flatten-retention-objmd 2020-07-01 15:50:49 -07:00
bert-e fb8cf65091 Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' and 'q/1157/7.8/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' into tmp/octopus/q/8.1 2020-07-01 19:33:59 +00:00
Jonathan Gramain 7792f7c603 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' into w/8.1/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2 2020-07-01 11:47:02 -07:00
bert-e 668d90b7d0 Merge branches 'w/8.1/feature/S3C-3112_ObjectLockEnabledSetterForBucket' and 'q/1174/7.8/feature/S3C-3112_ObjectLockEnabledSetterForBucket' into tmp/octopus/q/8.1 2020-06-30 21:39:40 +00:00
bert-e c1cfc59a0e Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' and 'q/1156/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into tmp/octopus/q/8.1 2020-06-30 20:09:44 +00:00
bert-e f956b02387 Merge branch 'w/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into tmp/octopus/w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1 2020-06-30 20:07:06 +00:00
Jonathan Gramain 86bca2502e Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1 2020-06-30 13:05:00 -07:00
bert-e 3aa49eed1d Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' and 'q/1155/7.8/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' into tmp/octopus/q/8.1 2020-06-30 19:59:56 +00:00
Jonathan Gramain a9c3b2218f Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1 2020-06-30 12:18:18 -07:00
Jonathan Gramain f459498e18 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-2.3.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-2.3.0 2020-06-29 19:22:19 -07:00
bert-e 55323aa7a2 Merge branch 'q/1153/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into tmp/normal/q/8.1 2020-06-30 00:53:55 +00:00
bert-e a20e875908 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into tmp/normal/q/8.1 2020-06-30 00:53:55 +00:00
bert-e a3a83f5ec8 Merge branch 'q/1152/7.8/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into tmp/normal/q/8.1 2020-06-30 00:52:18 +00:00
bert-e 51d3312de8 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into tmp/normal/q/8.1 2020-06-30 00:52:18 +00:00
Ilke 6383d14d49 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3112_ObjectLockEnabledSetterForBucket' into w/8.1/feature/S3C-3112_ObjectLockEnabledSetterForBucket 2020-06-29 14:49:35 -07:00
Jonathan Gramain 0e4035d45b Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0 2020-06-29 12:11:09 -07:00
Jonathan Gramain a18285ced8 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0 2020-06-29 12:02:12 -07:00
Rahul Padigela dc4e1829fc Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/xml2js-0.4.23' into w/8.1/dependabot/npm_and_yarn/development/7.4/xml2js-0.4.23 2020-06-28 21:15:18 -07:00
bert-e 3b438e03cd Merge branch 'w/7.8/dependabot/add-v2-config-file' into tmp/octopus/w/8.1/dependabot/add-v2-config-file 2020-06-29 03:14:01 +00:00
bert-e f2787ec013 Merge branches 'w/8.1/feature/S3C-3040-object-lock-iam-policies' and 'q/1133/7.8/feature/S3C-3040-object-lock-iam-policies' into tmp/octopus/q/8.1 2020-06-26 23:12:29 +00:00
bert-e 560ccef3ec Merge branch 'w/7.8/feature/S3C-3040-object-lock-iam-policies' into tmp/octopus/w/8.1/feature/S3C-3040-object-lock-iam-policies 2020-06-26 23:09:40 +00:00
Dora Korpar 3f4ed31153 Merge remote-tracking branch 'origin/improvement/bump-version' into w/8.1/improvement/bump-version 2020-06-26 15:59:54 -07:00
Jonathan Gramain fc23f68d0f Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-2987-helperForJsonStreamParsing' into w/8.1/bugfix/S3C-2987-helperForJsonStreamParsing 2020-06-24 17:38:24 -07:00
bert-e 2a4da20c0a Merge branch 'w/7.8/feature/S3C-3069-iam-policy-support-new-apis' into tmp/octopus/w/8.1/feature/S3C-3069-iam-policy-support-new-apis 2020-06-19 22:05:37 +00:00
bert-e 14c4696482 Merge branches 'w/8.1/bugfix/S3C-2987-add-v0v1-vFormat' and 'q/1108/7.8/bugfix/S3C-2987-add-v0v1-vFormat' into tmp/octopus/q/8.1 2020-06-17 20:31:04 +00:00
bert-e 275226278f Merge branch 'w/7.8/bugfix/S3C-2987-add-v0v1-vFormat' into tmp/octopus/w/8.1/bugfix/S3C-2987-add-v0v1-vFormat 2020-06-17 18:34:04 +00:00
bert-e b4b5712df7 Merge branch 'w/7.8/improvement/S3C-3044-add-audit-log-from-vault' into tmp/octopus/w/8.1/improvement/S3C-3044-add-audit-log-from-vault 2020-06-15 13:21:44 +00:00
bert-e 750c021c37 Merge branch 'w/7.8/feature/S3C-2787-retention-parsing' into tmp/octopus/w/8.1/feature/S3C-2787-retention-parsing 2020-06-12 23:21:12 +00:00
bert-e ee4d94c0fb Merge branch 'w/7.8/feature/S3C-2788-get-retention-route' into tmp/octopus/w/8.1/feature/S3C-2788-get-retention-route 2020-06-12 19:37:11 +00:00
bert-e 98f1d219a9 Merge branch 'w/7.8/feature/S3C-2788-get-object-retention' into tmp/octopus/w/8.1/feature/S3C-2788-get-object-retention 2020-06-06 04:18:21 +00:00
Dora Korpar fb363030c0 fix linter 2020-06-05 20:49:13 -07:00
Dora Korpar 7aeb32e223 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2787-objmd-retention' into w/8.1/feature/S3C-2787-objmd-retention 2020-06-05 20:43:05 -07:00
bert-e 5bdee7eb8a Merge branch 'w/7.8/bugfix/S3C-2945_fixGetLegalHoldRoute' into tmp/octopus/w/8.1/bugfix/S3C-2945_fixGetLegalHoldRoute 2020-06-05 01:45:31 +00:00
bert-e b8fd646097 Merge branch 'w/7.8/bugfix/S3C-2899-mergeStreamDestroy' into tmp/octopus/w/8.1/bugfix/S3C-2899-mergeStreamDestroy 2020-06-01 05:41:07 +00:00
bert-e a9d6e05c6e Merge branch 'w/7.8/feature/S3C-2945_getObjectLegalHoldRoute' into tmp/octopus/w/8.1/feature/S3C-2945_getObjectLegalHoldRoute 2020-05-29 18:44:01 +00:00
Ilke dc412e8953 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2944_putObjectLegalHold' into w/8.1/feature/S3C-2944_putObjectLegalHold 2020-05-29 09:00:37 -07:00
bert-e 36b68be051 Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1delimiterVersions' and 'q/1031/7.7/bugfix/S3C-2899-vformatV1delimiterVersions' into tmp/octopus/q/8.1 2020-05-21 22:39:44 +00:00
bert-e 3f19a00b32 Merge branch 'feature/S3C-2790_SupportGetObjectLockConfig' into tmp/octopus/w/8.1/feature/S3C-2790_SupportGetObjectLockConfig 2020-05-21 22:28:12 +00:00
bert-e ea8166cf7a Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1delimiterMaster' and 'q/1028/7.7/bugfix/S3C-2899-vformatV1delimiterMaster' into tmp/octopus/q/8.1 2020-05-20 22:39:27 +00:00
bert-e c06f735e82 Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1MPU' and 'q/1017/7.7/bugfix/S3C-2899-vformatV1MPU' into tmp/octopus/q/8.1 2020-05-20 21:03:53 +00:00
bert-e b8c4ae4203 Merge branch 'w/8.1/bugfix/S3C-2899-helperForListingAlgoGenMDParams' into tmp/octopus/q/8.1 2020-05-20 04:18:48 +00:00
Dora Korpar 0cf9a9cdd5 bf: ZENKO-2610 fromObj extra param 2020-05-19 17:51:47 -07:00
bert-e d201e572fd Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1delimiterVersions' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1delimiterVersions 2020-05-19 23:47:34 +00:00
bert-e 400dc24281 Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1delimiterMaster' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1delimiterMaster 2020-05-19 23:47:21 +00:00
bert-e f59cea6b34 Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1MPU' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1MPU 2020-05-19 23:47:08 +00:00
bert-e f19feb949d Merge branch 'w/7.7/bugfix/S3C-2899-helperForListingAlgoGenMDParams' into tmp/octopus/w/8.1/bugfix/S3C-2899-helperForListingAlgoGenMDParams 2020-05-19 23:46:02 +00:00
Jonathan Gramain bbef1964d7 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2899-passVformatToListingParams' into w/8.1/bugfix/S3C-2899-passVformatToListingParams 2020-05-18 11:52:02 -07:00
bert-e 43cd5f59b0 Merge branch 'feature/S3C-2789-put-object-lock-configuration' into tmp/octopus/w/8.1/feature/S3C-2789-put-object-lock-configuration 2020-05-15 21:49:57 +00:00
bert-e dd7390ade6 Merge branches 'w/8.1/feature/S3C-2789-put-objlock-bucketinfo' and 'q/995/7.7/feature/S3C-2789-put-objlock-bucketinfo' into tmp/octopus/q/8.1 2020-05-15 18:16:23 +00:00
Dora Korpar a3739cc836 Merge remote-tracking branch 'origin/feature/S3C-2789-put-objlock-bucketinfo' into w/8.1/feature/S3C-2789-put-objlock-bucketinfo 2020-05-15 11:03:53 -07:00
bert-e 97682f56bf Merge branch 'bugfix/ZENKO-2591-correctly-encode' into q/8.1 2020-05-14 23:25:09 +00:00
bert-e ce4ca533e2 Merge branch 'w/7.7/bugfix/S3C-2899-mergeStreamTooling' into tmp/octopus/w/8.1/bugfix/S3C-2899-mergeStreamTooling 2020-05-13 22:40:45 +00:00
bert-e 26bff09887 Merge branch 'w/7.7/bugfix/S3C-2899-versioningKeyFormatConstants' into tmp/octopus/w/8.1/bugfix/S3C-2899-versioningKeyFormatConstants 2020-05-11 22:30:25 +00:00
Pepijn Van Eeckhoudt f6165146ec Correct UTF-16 surrogates URI encoding
Signed-off-by: Pepijn Van Eeckhoudt <pepijn.vaneeckhoudt@datadobi.com>
(cherry picked from commit 2c3b10521ce99129d84c9ed600d14c67ee5e41ab)
2020-05-09 09:52:04 -07:00
Ilke 9f580444f3 fix 2020-05-04 21:32:37 -07:00
Ilke 93fe6fa94d Merge remote-tracking branch 'origin/feature/S3C-2785_ObjectLockCheckToBucketInfoModel' into w/8.1/feature/S3C-2785_ObjectLockCheckToBucketInfoModel 2020-05-04 18:34:19 -07:00
Jonathan Gramain d9ff2c2060 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2726-removeSomeDefaultAttributesFromObjectMD' into w/8.1/bugfix/S3C-2726-removeSomeDefaultAttributesFromObjectMD 2020-04-22 14:35:20 -07:00
bert-e e553342616 Merge branch 'w/7.7/bugfix/S3C-2668_allow_utf8_characters_in_tags' into tmp/octopus/w/8.1/bugfix/S3C-2668_allow_utf8_characters_in_tags 2020-04-14 19:46:07 +00:00
Ilke 8a9dbc4de7 Merge remote-tracking branch 'origin/improvement/S3C-2749_UnitTestsForUrlDuration' into w/8.1/improvement/S3C-2749_UnitTestsForUrlDuration 2020-04-13 13:07:28 -07:00
Jonathan Gramain 81d05b6ea8 improvement: ZENKO-2535 add microVersionId to ObjectMD
Add a new microVersionId field that is a hex-encoded field of 64 bits
randomly generated.

Updating the microVersionId field can be useful to:

- force updates in MongoDB when no other metadata changes

- detect a change when fields change but object version does not
  change e.g. when ingesting a putObjectTagging coming from S3C to
  Zenko

- manage concurrent updates, by adding a condition on the
  microVersionId and updating it for each metadata update to perform

In order for the change to be less intrusive, it is an optional field:
if ObjectMD.updateMicroVersionId() is not called by the client, the
metadata will not contain a microVersionId field. Clients will call
this function when needed on a case-by-case basis.
2020-04-10 15:21:50 -07:00
bert-e 44b8de565f Merge branch 'feature/S3C-2729-customize-s3-presign-url-expiry' into tmp/octopus/w/8.1/feature/S3C-2729-customize-s3-presign-url-expiry 2020-04-04 00:23:49 +00:00
vrancurel 3ed66c50f6 bugfix: update master if version is gte
This change is a workaround to palliate the fact we do not have micro
version to manage micro changes such as ACLs or tags changed. Indeed in
the AWS S3 specification such changes do not trigger a new version but
update the version (and the master) in place.
2020-03-17 16:26:41 -07:00
bert-e 90e1cff9f9 Merge branch 'bugfix/ZENKO-2352-httpError424IfLocationDoesNotExist' into q/8.1 2020-02-27 22:55:48 +00:00
Jonathan Gramain 9f323b32ea bugfix: ZENKO-2352 send back HTTP 424 when location does not exist
Send back an HTTP error 424 (Failed Dependency, a WebDAV extension)
instead of HTTP 503 (Service Unavailable), when the backend client
cannot retrieve the location of the data and gets a 404 error from the
server when issuing a HEAD request. This HEAD request is triggered on
the backend when a client issues a GET request on Zenko for an object
stored in a cloud backend. The error returned to the client also
contains a more specific error code "LocationNotFound", which is not
part of AWS standard.

The immediate purpose is to have backbeat not retry on such errors, as
they might arise if e.g. a Zenko bucket is backed by an S3C location,
and the out-of-band updates lag behind, so a user might have deleted
versions on S3C before Zenko got notified of the deletion, thinking
the object is still available.

More generally, even in the hypothetic case where a server-side bug
would have deleted the data, it's better not to have the client retry
as this is a definite failure and the client will just retry vainly
until it times out. Sending back a 4xx error makes this clear to the
client that it should not retry (not until something changes on its
side, like writing the same key again).

The patch applies on all supported backends: AWS, Azure, GCP.
2020-02-27 14:47:11 -08:00
bert-e dee53c8ad8 Merge branches 'w/8.1/bugfix/S3C-2502-vault-req-ip-header-port' and 'q/953/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/q/8.1 2020-02-26 17:51:07 +00:00
bert-e 9680071e1a Merge branch 'w/7.7/bugfix/S3C-2604-listMultipleBucketMetrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-listMultipleBucketMetrics 2020-02-26 09:27:06 +00:00
bert-e 6dd3aa92a4 Merge branch 'w/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/w/8.1/bugfix/S3C-2502-vault-req-ip-header-port 2020-02-25 21:39:00 +00:00
bert-e a9618bc0bb Merge branches 'w/8.1/bugfix/S3C-2604-list-multiple-bucket-metrics' and 'q/949/7.7/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/q/8.1 2020-02-25 19:25:14 +00:00
bert-e b6042035c0 Merge branch 'w/7.7/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-24 15:45:41 +00:00
bert-e d2fafe8ef3 Merge branch 'w/7.7/bugfix/S3C-2623_Explicit_socket_destroyed_check-port' into tmp/octopus/w/8.1/bugfix/S3C-2623_Explicit_socket_destroyed_check-port 2020-02-24 05:28:56 +00:00
bert-e fb18cba367 Merge branch 'w/7.7/bugfix/S3C-2623_Explicit_socket_destroyed_check' into tmp/octopus/w/8.1/bugfix/S3C-2623_Explicit_socket_destroyed_check 2020-02-23 21:21:49 +00:00
bert-e bab9d5dc24 Merge branch 'w/7.7/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-05 22:59:34 +00:00
Alexander Chan e531e5e711 improvement: ZENKO-2278 count items code reorg 2020-01-16 21:17:21 +00:00
bert-e f54d356669 Merge branch 'w/8.1/bugfix/S3C-2541-algo-LRUCache' into tmp/octopus/q/8.1 2020-01-03 21:18:22 +00:00
Jonathan Gramain c1bb2ac058 bugfix: ZENKO-2261 effectively reuse sproxyd connections
Dependency update on sproxydclient fix (S3C-2527).
2019-12-30 11:49:35 -08:00
Jonathan Gramain d76eeeea89 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2541-algo-LRUCache' into w/8.1/bugfix/S3C-2541-algo-LRUCache 2019-12-27 15:38:23 -08:00
Alexander Chan ad58f66981 feature: ZENKO-2089 add mongodb conditionals
add conditional put/delete operations to allow for correct blobserver
behaviors
2019-12-11 16:02:26 -08:00
bert-e 85b5599ce2 Merge branch 'bugfix/ZENKO-2250-reworkChunkedUploadStreamHandling' into q/8.1 2019-12-11 22:24:48 +00:00
Dora Korpar 3121d29140 bf: ZENKO 2219 mongo socket timeout option 2019-12-10 14:59:55 -08:00
Jonathan Gramain a75db3122f bugfix: ZENKO-2250 rework chunked upload stream handling
Adapt changes extracted from CloudServer S3C pull request (ticket
S3C-2504) to Zenko:

- Original pull request: https://github.com/scality/cloudserver/pull/2247

- Commit: 53d382f5c3
2019-12-10 12:10:02 -08:00
bert-e d994e2ae60 Merge branch 'bugfix/ZENKO-2230-crash-on-bucket-creation-error' into q/8.1 2019-11-18 19:19:45 +00:00
Rached Ben Mustapha c443793968 feature: Abstract out locations from item count
The item count scan should not really be aware of how specifically
locations are stored and configured, this allows for flexibility.
2019-11-16 14:34:44 -08:00
Rached Ben Mustapha 517a034291 bugfix: crash on createBucket error during setup 2019-11-16 12:29:27 -08:00
Rached Ben Mustapha cc6671f37c bugfix: use default auth DB in mongo log reader 2019-11-08 11:38:29 -08:00
Rached Ben Mustapha 87bb3126a3 bugfix: generalize mongodb authentication 2019-11-05 14:39:54 -08:00
bert-e cedd08686a Merge branch 'w/7.6/bugfix/S3C-2269/ArnMatch_case_sensitive_check' into tmp/octopus/w/8.1/bugfix/S3C-2269/ArnMatch_case_sensitive_check 2019-10-08 19:39:03 +00:00
bert-e 635d2fe6d9 Merge branch 'w/7.6/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens' into tmp/octopus/w/8.1/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens 2019-10-03 22:20:29 +00:00
Jianqin Wang 9557e36438 bugfix: prevent stack err for !metaHeaders 2019-09-30 14:09:57 -07:00
bert-e 2bb0e171d8 Merge branch 'bugfix/S3C-2440-get-policy-xml-error' into tmp/octopus/w/8.1/bugfix/S3C-2440-get-policy-xml-error 2019-09-23 19:30:33 +00:00
bert-e 68f5d3c9f2 Merge branch 'bugfix/S3C-2435-fix-object-action-parse' into tmp/octopus/w/8.1/bugfix/S3C-2435-fix-object-action-parse 2019-09-17 22:11:28 +00:00
vrancurel 71caf08c19 bugfix: in some cases oplog value can be undefined
E.g. collections that do not use a value field.
2019-09-17 11:15:29 -07:00
Guillaume Gimenez 38403b84aa feature: ZENKO-2088: blob-issued-etag 2019-09-09 14:06:11 -07:00
Jianqin Wang 21610dd88d feature: helper functions for blob services 2019-09-06 15:40:08 -07:00
bbuchanan9 7566d1f0a9 Revert "bugfix: S3C-2052 Delete orphaned data"
This reverts commit 5de85713ef.
2019-08-28 16:58:31 -07:00
bbuchanan9 28415a5c9b Revert "bugfix: S3C-2052 Add error functions"
This reverts commit 9d02f86cf5.
2019-08-28 14:55:28 -07:00
Taylor McKinnon 506a9ad37d improv(ZENKO-2068): Improve MongoClientInterface checkHealth 2019-08-26 11:25:53 -07:00
bert-e 1c6e56e8ef Merge branch 'bugfix/S3C-2052/delete-orphaned-data-remaining-APIs' into q/8.1 2019-08-20 18:10:33 +00:00
bbuchanan9 9d02f86cf5 bugfix: S3C-2052 Add error functions 2019-08-20 10:31:29 -07:00
bert-e 5c4547a3a9 Merge branch 'bugfix/S3C-2396-fix-bucket-policy-parsing' into tmp/octopus/w/8.1/bugfix/S3C-2396-fix-bucket-policy-parsing 2019-08-19 19:02:28 +00:00
bbuchanan9 5de85713ef bugfix: S3C-2052 Delete orphaned data 2019-08-13 14:16:38 -07:00
Rahul Padigela 68defde532 bugfix: S3C-2369 bump sproxydclient 2019-08-09 14:51:59 -07:00
Dora Korpar 9e5d4ae95b fix bucketinfo tests 2019-08-09 14:11:33 -07:00
Dora Korpar 633ce2c069 Merge remote-tracking branch 'origin/bugfix/S3C-2276-bucketinfo-update' into w/8.1/bugfix/S3C-2276-bucketinfo-update 2019-08-09 13:40:16 -07:00
Dora Korpar 08ddc07d1c Merge remote-tracking branch 'origin/feature/S3C-2276-bucket-policy-model' into w/8.1/feature/S3C-2276-bucket-policy-model 2019-08-08 16:44:00 -07:00
Katherine Laue bc6c9c8c36 update yarn.lock file 2019-08-08 11:24:10 -07:00
bert-e 3dc9b958f7 Merge branch 'w/7.5/improvement/S3C-2352-install-yarn-frozen-lockfile' into tmp/octopus/w/8.1/improvement/S3C-2352-install-yarn-frozen-lockfile 2019-08-08 18:18:51 +00:00
vrancurel 4b5c0ff923 bf: fixing a typo introduced in the improvement
was missing an underscore. was not caught by tests.
2019-08-06 13:58:03 -07:00
vrancurel 62536f66df improvement: filters out special collections
Create a function to identify special collections.
Exclude collections starting with __ .
Nevertheless keeping explicit naming on collections
that are used directly by cloudserver even though they start with __
for sake of clarity.
Include a unit test.
2019-08-06 11:04:03 -07:00
bert-e 9032b89e6f Merge branch 'feature/S3C-2282-bucket-policy-validation' into tmp/octopus/w/8.1/feature/S3C-2282-bucket-policy-validation 2019-08-01 20:17:09 +00:00
vrancurel 9014761c70 bf: deserialize dots and dollars from oplog
To allow dots and dollars in tags we serialize them into a unicode
version. We need to properly deserialize them when reading the oplog.
2019-08-01 10:41:42 -07:00
bert-e 8d9864264d Merge branch 'w/7.5/improvement/S3C-2352-install-yarn' into tmp/octopus/w/8.1/improvement/S3C-2352-install-yarn 2019-07-30 11:35:10 -07:00
Rahul Padigela 839182292c Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2351-update-joi' into w/8.1/improvement/S3C-2351-update-joi 2019-07-29 16:05:13 -07:00
Rahul Padigela a197b2b6a4 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2351-update-joi' into w/8.0/improvement/S3C-2351-update-joi 2019-07-29 15:59:45 -07:00
bert-e adf6cfc8e4 Merge branch 'w/8.0/feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/8.1/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:38 +00:00
bert-e 40aa7d836f Merge branch 'w/7.5/feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/8.0/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:38 +00:00
bert-e 4fa15fce2a Merge branch 'w/8.0/feature/S3C-2346-bucket-policy-routes' into tmp/octopus/w/8.1/feature/S3C-2346-bucket-policy-routes 2019-07-26 17:14:33 +00:00
bert-e 279f08c870 Merge branch 'feature/S3C-2346-bucket-policy-routes' into tmp/octopus/w/8.0/feature/S3C-2346-bucket-policy-routes 2019-07-26 17:14:33 +00:00
anurag4dsb 05a8475f1c
Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2335-fixDataServerCloseSync' into w/8.1/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 16:19:21 -07:00
anurag4dsb 8c664d9076
Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2335-fixDataServerCloseSync' into w/8.0/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 16:16:06 -07:00
Jianqin Wang 77172f33f8 ft: ZENKO-1640 set blob metadata (support user md overwrites) 2019-07-15 10:51:48 -07:00
Guillaume Gimenez 0a0fe7f1da feature: ZENKO-1892: export azure models 2019-07-09 13:36:06 -07:00
Salim 6d7437a776 bf: allow delete markers on NFS files 2019-07-03 17:57:42 -07:00
bert-e 1a6174dadf Merge branch 'bugfix/ZENKO-1930' into q/8.1 2019-07-03 17:11:53 +00:00
vrancurel c57cde88bb fix the design of the putObjectVerCase4
- Repair master instead of creating PHD.
  - Note that backbeat has to be modified to use
  params.repairMaster instead of params.usePHD.
2019-06-28 15:06:08 -07:00
Rahul Padigela 6e97c01edd Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2127-upgrade-node' into w/8.1/improvement/S3C-2127-upgrade-node 2019-06-27 16:11:39 -07:00
Rahul Padigela dd6fde61bb Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2127-upgrade-node' into w/8.0/improvement/S3C-2127-upgrade-node 2019-06-27 16:06:24 -07:00
Benoit A 3e8c43e05b ZENKO-1930 don't call batchDelete inconditionnally
If backend does not expose API, don't call batchDelete

Also add a minimum of 2 keys to delete at once for the batch delete to
qualify.
2019-06-22 09:49:38 +02:00
Nicolas Humbert 633efcbc50 OB-1840 Secure MongoDb access 2019-06-19 11:42:05 -07:00
Alexander Chan d99b430ac4 bugfix: ZENKO-1908 update sproxydclient 2019-06-18 13:19:21 -07:00
philipyoo 8f71d4ff03 bf: ZENKO-1736 count items scan rework
Count items scan, called by cloudserver reportHandler,
returns metrics by aggregate counts of mongo objects
and buckets. The scan is triggered each hour, but
holds the request hostage.

The change here is to separate the aggregate scan
from the countItems call made by reportHandler.
The scan will instead be called by a kubernetes
cronjob. Results of the scan will be saved in infostore.

Bucket info and bucket count will be collected every time
still and this should not take too long.
2019-06-14 16:40:40 -07:00
Rahul Padigela d0f77cee75 bugfix: S3C-2243 fix check for location type
This fixes the check where the logic should be looking at the type
of location instead of the name to leverage batch delete. It also fixes
the format sent to the sproxydclient which expects and object with keys
as an attribute whose value is an array of sproxyd keys.
2019-06-11 19:29:51 -07:00
bert-e 4419db7b23 Merge branch 'feature/ZENKO-1842/azure-info-models' into q/8.1 2019-06-04 20:42:38 +00:00
Rahul Padigela 3672df0fc4 bugfix: S3C-1139 return success for non-existing object deletes 2019-06-03 23:30:48 -07:00
Dora Korpar 9b223bea87 improvement: S3C 1139 implement batch delete for sproxyd client 2019-06-03 10:20:17 -07:00
Guillaume Gimenez b7dfc3a9c0 feature: ZENKO-1842: azure-info-models
Added Azure info models for storage accounts, containers and blobs
2019-05-29 17:39:18 -07:00
Dora Korpar 787f66458f bf: ZENKO 1728 sproxyd put fix 2019-05-23 17:24:38 -07:00
Dora Korpar 618b179d5c bf: ZENKO 1728 sproxyd put error tests 2019-05-23 17:01:05 -07:00
bert-e e6ddad1193 Merge branch 'w/7.5/bugfix/S3C-2172-bucket-error' into tmp/octopus/w/8.0/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:47 +00:00
bert-e 6575be0050 Merge branch 'w/8.0/bugfix/S3C-2172-bucket-error' into tmp/octopus/w/8.1/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:47 +00:00
Jianqin Wang 1f7263c320 Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis
Update package-lock.json file
2019-05-20 16:19:31 -07:00
Jianqin Wang 9da1a8e1f7 Update package-lock.json file with ioredis 4.9.5 upgrade 2019-05-20 16:16:11 -07:00
Jianqin Wang 14f8690a9a Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis 2019-05-20 15:05:07 -07:00
Jianqin Wang 700cb4eb48 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-20 14:52:03 -07:00
philipyoo 7dd4dca7e5 bf: ZENKO-1718 ingestion mongo putObjectVerCase4
Add a putObjectVerCase to MongoClientInterface for ingestion
use-cases. Remove management of master versions in the
ingestion process and rely on the natural ordering of
objects stored in mongo. Get and Delete operations will
rely on internal MongoClientInterface methods for performing
relevant operations. To do this, we set PHD on master for
each object version ingested.
2019-05-08 14:53:52 -07:00
bert-e a5d248000e Merge branch 'bugfix/ZENKO-1585-zenkoUserMDTagConstant' into q/8.1 2019-05-08 21:12:51 +00:00
Taylor McKinnon dae12b245b ft(ZENKO-1652): Update MetadataWrapper for List Blobs 2019-05-07 11:29:02 -07:00
bert-e c0129eb0d7 Merge branch 'feature/ZENKO-1755-hdclient-support' into q/8.1 2019-05-07 13:44:11 +00:00
philipyoo bd0d6c1942 bf: ZENKO-1585 zenko user-metadata header constant
This constant will be used in backbeat and cloudserver as
a user metadata defined header indicating if an object has
been created in a zenko deployment
2019-05-02 13:33:05 -07:00
Jonathan Gramain ed2d393e98 refactor: ZENKO-1110 remove backbeat API routes+Metrics
Remove backbeat API routes and Metrics class, as they have been moved
to backbeat repository.
2019-04-30 15:05:10 -07:00
bert-e 886110138a Merge branch 'feature/ZENKO-1760-exposePodMetricsToPrometheus' into q/8.1 2019-04-30 20:07:52 +00:00
Jonathan Gramain 397eecb370 feature: ZENKO-1760 expose prometheus metrics in HealthProbeServer
- add a class ZenkoMetrics to manage metrics process-wide, as a thin
  layer on top of prom-client module

- add a new route '/_/monitoring/metrics' exposed by the
  HealthProbeServer that runs on pods, to expose default metrics
  (nodejs etc) and custom metrics in prometheus format. Then,
  prometheus will be able to scrape them for each pod. Ideally the
  class should be renamed, maybe to MonitoringServer, kept it for
  later as it involves a larger refactor.
2019-04-30 12:46:48 -07:00
bert-e 3623b992da Merge branch 'bugfix/ZENKO-1749-exceptionInPutBucketLifecycleConfiguration' into q/8.1 2019-04-25 23:07:44 +00:00
Jonathan Gramain 78b64bebed bugfix: ZENKO-1749 fix exception with invalid lifecycle config
An empty Filter attribute along with invalid rule caused an exception
in LifecycleConfiguration._getRuleFilter(): make it return a proper
description string.

Also renamed the function to _getRuleFilterDesc().
2019-04-24 15:13:20 -07:00
Dora Korpar e857bb5f5a bf: S3C 2120 abort mpu timeout 2019-04-24 12:38:23 -07:00
Benoit A 9c1dab1055 ZENKO-1755 HD-97 add support hdclient
* import module hdclient
* add support for hdclient in parseLC
2019-04-24 20:39:02 +02:00
bert-e e18850911e Merge branch 'bugfix/ZENKO-1738-bucketNamesWithPeriodsTrimmed' into tmp/octopus/w/8.1/bugfix/ZENKO-1738-bucketNamesWithPeriodsTrimmed 2019-04-23 23:55:56 +00:00
Jonathan Gramain 2ff9cf866d bugfix: ZENKO-1738 bucket names with period trimmed by backbeat
Fix mongoclient.ListRecordStream to properly pass on bucket names with
periods (like foo.bar), instead of truncating the name after the first
period.

Down the line, that fixes replication for objects contained in such
buckets.
2019-04-23 16:54:27 -07:00
bbuchanan9 cc6ed165dd bugfix: ZENKO-1606 MPU tagging during replication 2019-04-18 14:08:38 -07:00
Dora Korpar a6b5c21e5d bf: ZENKO 1512 metastore circular json 2019-04-17 17:02:28 -07:00
bbuchanan9 64426b1450 bugfix: ZENKO-1606 Update AWS SDK dependency 2019-04-10 09:36:14 -07:00
bert-e 160fe96b18 Merge branch 'feature/ZENKO-1616_Update_ObjectMD_model_for_azure_blob_api' into q/8.1 2019-04-02 18:39:32 +00:00
Taylor McKinnon 59290513e3 ft(ZENKO-1616): Update ObjectMD model for azure blob api 2019-04-01 14:08:03 -07:00
Rahul Padigela 6b9be35d8e bugfix: ZENKO-1681 remove deprecation warnings
Removes warning "node:24) [DEP0013] DeprecationWarning: Calling an asynchronous
 function without callback is deprecated."
2019-04-01 13:42:58 -07:00
bbuchanan9 dffcbefe9b bugfix: ZENKO-1583 Allow options for Azure delete 2019-03-29 10:07:06 -07:00
bbuchanan9 c470cfb5b1 bugfix: ZENKO-1583 Return data in head operation 2019-03-29 10:07:06 -07:00
philipyoo abcff1b04e ft: ZENKO-1661 add ingestion all metric route 2019-03-26 12:12:50 -07:00
bbuchanan9 6791d1b561 bugfix: ZENKO-1610 Non-current version transition 2019-03-21 13:13:00 -07:00
bert-e a8e0a30918 Merge branch 'feature/ZENKO-1566-addIngestionMetricRoute' into q/8.1 2019-03-15 22:17:37 +00:00
philipyoo 487fe8bf35 ft: ZENKO-1566 add ingestion metrics routes
Add ingestion metric routes to backbeat routes.
Need to add a conditional on response object to not include
bytes in response.
2019-03-15 13:06:53 -07:00
bert-e b7c84ef7d3 Merge branch 'feature/S3C-2031/kmip-arsenal-errors' into tmp/octopus/w/8.0/feature/S3C-2031/kmip-arsenal-errors 2019-03-14 23:11:34 +00:00
bert-e b55295818f Merge branch 'w/8.0/feature/S3C-2031/kmip-arsenal-errors' into tmp/octopus/w/8.1/feature/S3C-2031/kmip-arsenal-errors 2019-03-14 23:11:34 +00:00
philipyoo 0213bcfd25 rf: ZENKO-1566 backbeat route dataPoints by id
Do not pass redis keys to backbeat routes function. Instead
data points should be by common identifier (as strings) and
we can map these data points to their respective redis keys
for a given service
2019-03-12 12:20:23 -07:00
bert-e 32b0946679 Merge branch 'feature/ZENKO-1560-addClientGetterMDWrapper' into q/8.1 2019-03-12 18:11:14 +00:00
JianqinWang bef886d8ad ZENKO-1377: Metadata mock moved back into scality/backbeat repo 2019-03-11 15:21:59 -07:00
philipyoo d44c2f123e ft: ZENKO-1560 add mongo get ingestion buckets
Add method to MongoClientInterface for fetching
ingestion buckets. Extend through MetadataWrapper.
2019-03-11 15:02:11 -07:00
bert-e f199d52c54 Merge branches 'w/8.1/feature/S3C-2002-admin-service' and 'q/722/8.0/feature/S3C-2002-admin-service' into tmp/octopus/q/8.1 2019-03-08 00:17:06 +00:00
bert-e b9c419dde7 Merge branches 'w/8.0/feature/S3C-2002-admin-service' and 'q/722/7.5/feature/S3C-2002-admin-service' into tmp/octopus/q/8.0 2019-03-08 00:17:05 +00:00
bert-e 5cf3948ba2 Merge branch 'w/8.0/feature/S3C-2002-admin-service' into tmp/octopus/w/8.1/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
bert-e 226088c8fb Merge branch 'w/7.5/feature/S3C-2002-admin-service' into tmp/octopus/w/8.0/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
Rahul Padigela bca10414bc Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2017-berte-fix' into w/8.1/bugfix/S3C-2017-berte-fix 2019-03-07 10:16:36 -08:00
bert-e 8f0cab8d91 Merge branch 'w/7.5/bugfix/S3C-2017-berte-fix' into tmp/octopus/w/8.0/bugfix/S3C-2017-berte-fix 2019-03-07 18:15:10 +00:00
Jonathan Gramain 40c234bb5f feature: ZENKO-1420 createAggregateETag helper cleanup
To increase reusability of createAggregateETag() helper function, pass
it a single argument which is an array of individual part ETags.
2019-03-06 15:57:17 -08:00
bert-e 26e2b5e425 Merge branch 'w/8.1/feature/S3C-1968/kmip-highlevel-driver' into tmp/octopus/q/8.1 2019-03-05 19:57:25 +00:00
bert-e df5a61cb8d Merge branch 'feature/ZENKO-1402-move-data-wrapper' into q/8.1 2019-03-04 19:26:38 +00:00
bert-e b01a390c46 Merge branch 'w/8.0/feature/S3C-1968/kmip-highlevel-driver' into tmp/octopus/w/8.1/feature/S3C-1968/kmip-highlevel-driver 2019-03-02 00:55:37 +00:00
Guillaume Gimenez 87103f83e1 Merge remote-tracking branch 'origin/feature/S3C-1968/kmip-highlevel-driver' into w/8.0/feature/S3C-1968/kmip-highlevel-driver 2019-03-01 16:53:03 -08:00
bert-e 9ba5d64cd2 Merge branches 'w/8.1/feature/S3C-1967/kmip-lowlevel-driver' and 'q/705/8.0/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/q/8.1 2019-03-02 00:46:13 +00:00
bert-e f4d4c9b76e Merge branches 'w/8.0/feature/S3C-1967/kmip-lowlevel-driver' and 'q/705/7.5/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/q/8.0 2019-03-02 00:46:12 +00:00
bert-e 2c149ea9b1 Merge branch 'w/8.1/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/q/8.1 2019-03-02 00:02:51 +00:00
philipyoo 735ad74bda bf: ZENKO-1424 add bucket info to MD mock
Changes in this commit:
- Add the bucket informations route to Metadata mock.
  This is to support changes to tests in backbeat. Route
  only fetches cseq of bucket
2019-03-01 12:20:44 -08:00
bert-e 1636c87556 Merge branch 'feature/ZENKO-1452-mdMockStripVersionIds' into q/8.1 2019-03-01 20:16:09 +00:00
bert-e 8e2d6d42a8 Merge branch 'w/8.0/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/8.1/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:52 +00:00
bert-e f11d6e223d Merge branch 'w/7.5/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/8.0/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:51 +00:00
philipyoo ebe2d1f24d ft: ZENKO-1452 md mock remove version id in url
For metadata mock, remove the version id in the url. The
expected response is the same.
2019-02-28 15:34:50 -08:00
bert-e 6a1bc69336 Merge branch 'w/8.0/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/w/8.1/feature/S3C-1967/kmip-lowlevel-driver 2019-02-28 20:49:53 +00:00
bert-e 0144158a37 Merge branch 'feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/w/8.0/feature/S3C-1967/kmip-lowlevel-driver 2019-02-28 20:49:53 +00:00
bert-e aea19c9cc2 Merge branch 'w/8.0/feature/S3C-1966/kmip-tls-transport' into tmp/octopus/w/8.1/feature/S3C-1966/kmip-tls-transport 2019-02-28 19:50:13 +00:00
bert-e daaeb5637a Merge branch 'feature/S3C-1966/kmip-tls-transport' into tmp/octopus/w/8.0/feature/S3C-1966/kmip-tls-transport 2019-02-28 19:50:12 +00:00
Dora Korpar c479933448 ft: ZENKO 1402 move data wrapper 2019-02-25 16:48:40 -08:00
JianqinWang f804aa9657 ZENKO-1377: update Arsenal mock for ingestion reader tests 2019-02-25 09:38:40 -08:00
Jonathan Gramain ad35b9ec78 Merge remote-tracking branch 'origin/bugfix/ZENKO-1522-isMultipartUploadHelper' into w/8.1/bugfix/ZENKO-1522-isMultipartUploadHelper 2019-02-21 17:57:42 -08:00
Jonathan Gramain 9fe0ba5c8c bugfix: ZENKO-1522 helper ObjectMD.isMultipartUpload()
Created this helper to check what kind of CRR to execute depending on
if the object is a MPU or not.
2019-02-21 17:53:45 -08:00
bert-e 2fe1e4da3c Merge branches 'w/8.1/feature/S3C-1925/kmip-ttlv-codec' and 'q/641/8.0/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/q/8.1 2019-02-22 00:49:10 +00:00
bert-e 6a4784417f Merge branches 'w/8.0/feature/S3C-1925/kmip-ttlv-codec' and 'q/641/7.5/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/q/8.0 2019-02-22 00:49:09 +00:00
bert-e 0ed8c750c9 Merge branch 'w/8.0/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/w/8.1/feature/S3C-1925/kmip-ttlv-codec 2019-02-22 00:31:26 +00:00
bert-e 0d33e5a69f Merge branch 'feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/w/8.0/feature/S3C-1925/kmip-ttlv-codec 2019-02-22 00:31:26 +00:00
bert-e ac470f4233 Merge branch 'w/8.0/bugfix/S3C-1985/listing-filter-value' into tmp/octopus/w/8.1/bugfix/S3C-1985/listing-filter-value 2019-02-19 23:45:28 +00:00
bert-e 23d406dc81 Merge branch 'w/7.5/bugfix/S3C-1985/listing-filter-value' into tmp/octopus/w/8.0/bugfix/S3C-1985/listing-filter-value 2019-02-19 23:45:28 +00:00
JianqinWang f11ccbfefa ZENKO-1377: update md mock for ingestion reader raft log tests
- update raft id expected for test bucket `bucket1`
 - clone response object before editing the base mock data
2019-02-11 16:16:54 -08:00
bert-e c8c0527f65 Merge branch 'feature/ZENKO-1446-metadata-mock' into q/8.1 2019-02-08 18:48:32 +00:00
JianqinWang d81d309420 ZENKO-1451: updating mock for use in bb 2019-02-08 08:53:40 -08:00
Dora Korpar c657b4b469 update Arsenal version 2019-02-07 18:03:48 -08:00
Dora Korpar 65c99ff86d ft: ZENKO 1402 move data backends 2019-02-07 18:03:48 -08:00
Jonathan Gramain 645433ed0c refactor: ZENKO-1420 ObjectMD.getUserMetadata()
Copy this helper from backbeat ObjectQueueEntry class since it's
related to ObjectMD and will be used by bare ObjectMD instances.

Add a short unit test too.
2019-02-06 18:50:59 -08:00
JianqinWang f9bb82ce43 ZENKO-1446: metadata-mock for bb 2019-01-30 14:37:43 -08:00
bert-e ab4500b842 Merge branch 'w/8.0/feature/S3C-1561-accountQuotas' into tmp/octopus/w/8.1/feature/S3C-1561-accountQuotas 2019-01-24 21:10:54 +00:00
bert-e 40a802b715 Merge branch 'feature/S3C-1561-accountQuotas' into tmp/octopus/w/8.0/feature/S3C-1561-accountQuotas 2019-01-24 21:10:53 +00:00
Giacomo Guiulfo 84bf7bd511 Merge remote-tracking branch 'origin/bugfix/ZENKO-1369-no-cache-option' into w/8.1/bugfix/ZENKO-1369-no-cache-option 2019-01-07 16:23:17 -08:00
Giacomo Guiulfo b5fa54ec11 bugfix(DataFileStore): add noCache option 2019-01-07 16:20:55 -08:00
Bennett Buchanan 58e9f26ae0 feature: ZENKO-1399 Check transitions time gap 2018-12-20 13:13:04 -08:00
Giacomo Guiulfo d6fdd153aa feat: add unit tests for parseURL 2018-12-18 19:15:10 -05:00
Giacomo Guiulfo 1e05f0f54e chore: move parseURL to utils 2018-12-18 18:17:01 -05:00
Giacomo Guiulfo 9c66b7ceba bugfix(ZENKO-1362): request path encoding 2018-12-18 16:56:02 -05:00
bert-e 0555d0b41a Merge branch 'feature/ZENKO-1389/zenko-md-parallel' into tmp/octopus/w/8.1/feature/ZENKO-1389/zenko-md-parallel 2018-12-18 19:42:52 +00:00
Guillaume Gimenez 39f2a53beb ft: ZENKO-1389: md proxy parallel route 2018-12-18 11:41:37 -08:00
Bennett Buchanan 0a75792ca6 feature: ZENKO-1317 AWS lifecycle compat 2018-12-17 12:45:42 -08:00
bert-e 5225fc231d Merge branch 'feature/ZENKO-1384/zenko-md-healthcheck' into tmp/octopus/w/8.1/feature/ZENKO-1384/zenko-md-healthcheck 2018-12-14 01:31:08 +00:00
Guillaume Gimenez 30c3ce1e2b ft: ZENKO-1384: md proxy healthcheck 2018-12-13 17:30:25 -08:00
Taylor McKinnon aa157c6d13 bf(ZENKO-1310): Fix HealthProbeServer flaky test 2018-12-12 17:01:04 -08:00
Bennett Buchanan 699890d2d7 feature: ZENKO-732 Lifecycle transition policies 2018-12-11 10:07:33 -08:00
Jonathan Gramain ea1a7d4d87 ZENKO-557 extend ObjectMDLocation with dataStoreVersionId
The backend version ID is part of the location for cloud backends, and
it's needed for the GC service to work on cloud locations.
2018-12-10 13:46:09 -08:00
bert-e a9297e707a Merge branch 'feature/ZENKO-717-add-replicationBackends-constant' into q/8.1 2018-12-10 18:43:41 +00:00
Bennett Buchanan 75dccc528d feature: ZENKO-733 Add setReplicationStorageType 2018-12-10 10:33:02 -08:00
bert-e 5d7cf78eda Merge branch 'feature/ZENKO-1351-pfsd-delete' into q/8.1 2018-12-07 22:49:28 +00:00
Giacomo Guiulfo 0a364fe379 feat(DataFileStore): add passthrough delete functionality 2018-12-05 11:33:56 -08:00
Rahul Padigela 345031f2bd chore: ignore a few dependencies for now 2018-12-04 17:39:01 -08:00
greenkeeper[bot] 0bc1fe1a71 chore(package): update lockfile package-lock.json 2018-12-05 01:08:29 +00:00
greenkeeper[bot] f23e457b83 docs(readme): add Greenkeeper badge 2018-12-05 01:08:25 +00:00
greenkeeper[bot] 09aca2dcf4 chore(package): update dependencies 2018-12-05 01:08:23 +00:00
greenkeeper[bot] d304334e92 chore(package): update dependencies 2018-12-05 01:08:20 +00:00
greenkeeper[bot] 7955b97810 chore: add Greenkeeper config file 2018-12-05 01:08:17 +00:00
Rahul Padigela d14cef843b feature: greenkeeper.io dependency manager boT 2018-12-04 16:57:38 -08:00
Dora Korpar f2b39fb3d7 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-1678-ipv6' into w/8.1/bugfix/S3C-1678-ipv6 2018-11-15 16:18:47 -08:00
Dora Korpar 9a009746be Merge remote-tracking branch 'origin/bugfix/S3C-1678-ipv6' into w/8.0/bugfix/S3C-1678-ipv6 2018-11-15 16:15:14 -08:00
Jeremy Desanlis 3e08bad2da ft: ZENKO-717: move replicationBackends constant from cloudServer.
With the management code moved from cloud server to its own repository,
this constant should be shared in Arsenal constants.
2018-11-15 15:22:41 -08:00
philipyoo 13b156b226 ft: ZENKO-1282 add scheduled resume for ingestion 2018-11-15 12:40:12 -08:00
JianqinWang 07f655c2f8 improvement: bump arsenal ver. for 8.1 2018-11-13 11:35:03 -08:00
JianqinWang f496cec8bf feat: ZENKO-833 add ingestion param for BucketInfo 2018-11-13 11:34:57 -08:00
bert-e 7f5413699d Merge branch 'bugfix/ZENKO-1175-oplogSkipFix' into tmp/octopus/w/8.1/bugfix/ZENKO-1175-oplogSkipFix 2018-11-08 22:47:51 +00:00
Jonathan Gramain d620fef517 bf: ZENKO-1175 fix when no saved ID exists
In case where there is no saved ID yet (initial deployment), do
process the very first entry in the log instead of skipping it. In
practice it should not have an impact because the very first entry in
the log is normally not due to be processed for CRR, but it ensures
correctness.
2018-11-08 14:46:04 -08:00
Jonathan Gramain 8ac3cf5548 ft: ZENKO-1175 tailable cursor to consume mongo oplog
Use a tailable custor to keep ordering guarantees for the records we
read. This also means we have to read from the beginning when we
reconnect (at startup), and start processing when we encountered the
unique ID previously stored in zookeeper.

Also removed dispatcher mode with MongoLogReader (was only used for
the short-lived Federation deployment of Zenko).
2018-11-08 14:45:07 -08:00
Giacomo Guiulfo ebd9a74666 feat: passthroughFile service 2018-11-02 15:52:50 -07:00
bert-e a1f9bef60e Merge branch 'feature/ZENKO-1282-ingestionPauseResume' into q/8.1 2018-10-30 16:50:19 +00:00
philipyoo 899107913c ft: ZENKO-1282 extend pause/resume for ingestion
Extend the backbeat api routes to add pause/resume for
ingestion.
2018-10-25 12:01:20 -07:00
Jonathan Gramain 18dfc6b4fa Merge remote-tracking branch 'origin/feature/S3C-1640-CRRRetryBackport' into w/8.0/feature/S3C-1640-CRRRetryBackport 2018-10-19 17:31:22 -07:00
Rahul Padigela 9fe16c64fa Merge remote-tracking branch 'origin/improvement/bumpArsenal' into w/8.0/improvement/bumpArsenal 2018-10-15 14:57:30 -07:00
vrancurel 3dee6e2d0b bugfix: manage the 'close' event on dataStream
When the underlying socket of the dataStream is closed this
  is not considered as a stream error. So we have to hook the
  event and do the cleanup by ourselves
2018-10-15 11:13:47 -07:00
vrancurel 3545eb4d62 bugfix: close socket on client error
When receiving this callback, sometimes the socket is already
  closed (e.g. upon RST) but sometimes we have to cloud it ourselves.
2018-10-15 11:13:47 -07:00
Dora Korpar 0a85eeb8b7 manual edit: add metastore changes 2018-09-25 16:17:42 -07:00
Dora Korpar 83759870f2 Merge remote-tracking branch 'origin/feature/S3C-1171-listv2' into w/8.0/feature/S3C-1171-listv2 2018-09-25 16:16:51 -07:00
Alexander Chan 0d4bf3c17f ft: ZENKO-1186 stalled sanity check 2018-09-20 15:40:07 -07:00
Alexander Chan 0117b39dcf bf: ZENKO-1155 add index restriction for mongo find call 2018-09-15 19:27:56 -07:00
Bennett Buchanan 549ca1f683 bugfix: ZENKO-1144 Update route and private method 2018-09-15 10:03:44 -07:00
bert-e e4a66343fb Merge branch 'bugfix/ZENKO-1144-fixSortedSetHelper' into q/8.0 2018-09-14 21:32:15 +00:00
philipyoo a89fdde6fd bf: ZENKO-1144 fix ttl of sorted set expires
Changes in this commit:
- Fix TTL Redis#expire from ms to secs
2018-09-14 14:29:55 -07:00
philipyoo 872a2d88e5 bf: ZENKO-1144 remove redis scan in crr metrics
Changes in this commit:
- Remove use of Redis#scan. Instead build query strings
  manually
2018-09-14 08:51:40 -07:00
philipyoo 0c9c462634 bf: ZENKO-1144 add sorted set support StatsModel
Changes in this commit:
- Helper method _normalizeTimestampByHour normalizes date to
  nearest hour
- Helper method _setDateToPreviousHour sets date back 1 hour
- method getSortedSetHours returns list of 24 normalized
  hourly timestamps
- method getSortedSetCurrentHour returns normalized
  hourly timestamp based on epoch passed
- method addToSortedSet adds to a sorted set and applies
  expiry if adding to new sorted set
2018-09-14 07:40:30 -07:00
philipyoo a3973ac7d3 ft: ZENKO-1144 redis wrapper for sorted sets
Changes in this commit:
- Add wrapper for Redis sorted set methods: ZADD, ZCARD,
  ZRANGE, ZRANGEBYSCORE, ZREM, ZSCORE
- Add wrapper for Redis methods: EXISTS
2018-09-13 18:28:48 -07:00
bert-e d1a8693fe5 Merge branch 'bugfix/ZENKO-1124-mongo-listing-loop' into q/8.0 2018-09-11 01:02:10 +00:00
Jeremy Desanlis 5687a48599 ZENKO-1124: mongo listing, avoid to loop 2018-09-10 17:07:01 -07:00
Nicolas Humbert 9dca871e1b fx: ZENKO-1112 Management client error logging 2018-09-07 10:04:26 -07:00
philipyoo 7088812c80 bf: ZENKO-1024 fix fail metrics in all route
All metrics function will query redis once for all data.
With the change to failure metrics, we want to pass
the request details object to the getFailedMetrics fxn
2018-09-04 08:48:05 -07:00
philipyoo 9f742d4921 bf: ZENKO-1024 use pending metrics for backlog
Pending metrics don't expire which was a cause for problems
with current backlog. This quick fix is to use pending
metrics in place of backlog but keeping the same names
and routes in place to avoid regression.
2018-08-31 17:19:14 -07:00
bert-e 2c31728905 Merge branch 'bugfix/ZENKO-1024/add-global-counters' into q/8.0 2018-08-24 22:06:46 +00:00
Bennett Buchanan 125ccbbfa9 bugfix: ZENKO-1024 Add pending counters 2018-08-24 14:28:36 -07:00
bert-e 40c8b37b30 Merge branch 'feature/ZENKO-1019-cancelScheduleResume' into q/8.0 2018-08-23 22:59:12 +00:00
bert-e 879075e4ec Merge branch 'bugfix/ZENKO-945-delimitermaster-filter' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-filter 2018-08-22 23:46:28 +00:00
philipyoo 79ed68ce9f ft: ZENKO-1019 add cancel scheduled resume route 2018-08-22 13:47:37 -07:00
bert-e cbfacb5ec0 Merge branch 'bugfix/ZENKO-945-delimitermaster-test' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-test 2018-08-20 23:40:54 +00:00
philipyoo 06dfdd9612 rf: use single StatsModel, use explicit var names
Changes in this commit:
- Remove `OBJECT_MONITORING_EXPIRY` and use `EXPIRY` instead
  as values are now same
- Use single instance of `StatsModel`
- Remove extra interval in `StatsModel` expiry field. Not
  needed anymore as throughput uses a 15 minute window and
  the extra interval for it will be available by default
- Use explicit variable names when data is fetched from
  `StatsClient`
2018-08-08 14:53:30 -07:00
philipyoo bf95506495 ft: ZENKO-925 increase crr metrics expiry to 24hrs
Changes reflected in this commit:
- Increase metrics expiry, but keep throughput to 15 minute
  averages.
- Add helper method `_getMaxUptime` to find # of intervals
- Update tests to reflect the extra intervals fetched from
  Redis/StatsModel
2018-08-08 14:53:30 -07:00
Alexander Chan db743f8269 improvement: version increase 2018-08-08 10:03:19 -07:00
Alexander Chan a2311bb69c bf: ZENKO-922 add redis disconnect method
Adds disconnect method to allow closing of the backbeat metrics redis
client
2018-08-06 17:52:24 -07:00
Alexander Chan c8f323237f bf: ZENKO-903 retrieve new bucket list on report 2018-08-06 13:07:16 -07:00
Rahul Padigela 5cf55fcb68 improvement: update package-lock version 2018-08-01 17:16:23 -07:00
Rahul Padigela de94a0e62e improvement: update test to adjust to nodejs 8
Buffer.from no longer throws errors if most of the string contains a valid hex.
Since the test is testing if an error is thrown for invalid hex, the test has been
updated to do the same.
2018-08-01 17:12:29 -07:00
Rahul Padigela 2b13994795 improvement: run tests in eve with nodejs 8 2018-08-01 16:25:59 -07:00
Rahul Padigela 769a461178 improvement: move metrics tests to functional 2018-08-01 16:24:40 -07:00
Rahul Padigela c11fc1d9d8 bugfix: ZENKO-898 install node-fcntl module 2018-08-01 15:59:11 -07:00
bert-e b8ad86a1f1 Merge branch 'feature/ZENKO-785-add-checkHealth-to-mongodb' into q/8.0 2018-07-31 19:04:56 +00:00
Giacomo Guiulfo 12c4df722b feat: add checkHealth to mongodb interface 2018-07-31 11:40:55 -07:00
bert-e f566e32322 Merge branch 'bugfix/ZENKO-751-setMaxObjectKeyLimit' into q/8.0 2018-07-30 17:17:11 +00:00
philipyoo 6413c92fbc bf: ZENKO-751 tempfix set max object key limit
Max key length will be set to 915 to account for different
situations. Default AWS key size is 1024, but mongo keys
allow for bytes of up to 1012. Factoring in version id,
and bucket match false (bucket name prefix), for now,
we will limit the key size to 915 and return an error
right away if object key byte size exceeds this limit.
2018-07-27 14:40:14 -07:00
bert-e 29182cce05 Merge branch 'bugfix/ZENKO-763-objectTagsAreNotReplicated' into q/8.0 2018-07-27 18:40:17 +00:00
Jonathan Gramain 9fb5b8b10d bf: ZENKO-763 rework mongo log consumer
- process individual mongo log entry types separately ('i', 'u',
  'd'). This is the main fix required to process updates coming from
  put-object-tagging or ACLs

- fix usage of uniqID:

  - previously it was ignored due to a typo (uniqId instead of
    uniqID), which meant we still processed multiple times entries
    from the same second

  - with typo fixed, it requires another change to make it useful:
    we have to emit the 'info' event at the end of the batch so that
    the last unique ID is presented

  - cleaner serialization of timestamp+uniqID: use JSON rather than
    custom parsing and pass it as an opaque string in info.end

- correct use of stream functions (e.g. end() was masked by a local
  variable called "this.end", fixed by prefixing private members with
  "_")

- fix timestamp output: do not use private member _high from
  Timestamp, use toNumber()

- fix mongo log flow control when reading from mongo log by using
  pipe() instead of just calling write() then end() so we don't
  bufferize contents unnecessarily.

- removed some unnecessary special case handling and 'this.push()'
  calls

Add unit tests to check ListRecordStream with known mongo log entry
types (which required moving the class in a separate file from
LogConsumer.js)
2018-07-25 18:06:06 -07:00
vrancurel 5631a892c6 bugfix: temporary fix for the s3-data pod ballooning issue
that consists in disabling file level caching for the files
  we store in the file data backend.
2018-07-25 16:17:16 -07:00
Rahul Padigela dfcdea46fc improvement: ZENKO-760 use callback instead of throw
This lets CloudServer handle MongoClient issues more gracefully
2018-07-20 17:21:36 -07:00
Rahul Padigela be02e59bfe bugfix: ensure setup callback is called 2018-07-20 16:51:17 -07:00
Rahul Padigela fdbeed1c4e improvement: ZENKO-760 add connection timeout for monogoclient
Mongoclient checks each node in the replica set to see which one's the primary,
this check has a default timeout of 30s which delays the startup of Cloudserver
when one of the nodes is unavailable. Cutting down the timeout makes it go through the
list of nodes in the replica set quicker to find the primary. MONGO_CONNECT_TIMEOUT_MS env
var is introduced to adjust the timeout in deployments.
2018-07-20 14:30:38 -07:00
bert-e 91fbc3fd23 Merge branch 'bugfix/ZENKO-642-multipleLifecycleConfigTags' into q/8.0 2018-07-17 20:42:38 +00:00
philipyoo 241338bcfa bf: apply multiple lifecycle filter tags if exists 2018-07-17 13:22:16 -07:00
Rached Ben Mustapha 6db80e9411 bf: return timely on data diskUsage subresource 2018-07-17 10:46:40 -07:00
bert-e d701352635 Merge branch 'bugfix/ZENKO-693/fixNegativeValues' into q/8.0 2018-07-10 20:02:18 +00:00
Alexander Chan b291ccc03f bf: ZENKO-693 clamp negative values to 0 2018-07-10 12:11:16 -07:00
Bennett Buchanan 0426f44dee bugfix: ZENKO-621 Make _buildKey public method 2018-07-10 11:34:47 -07:00
Rahul Padigela 1b9242788a bugfix: ZENKO-632 check if destroy method is available
s3-data returns Readable unlike sproxydclient which returns an instance of
http.IncomingMessage which implements Readable stream and extends it with
destroy method
2018-07-06 15:30:38 -07:00
Bennett Buchanan 1a2ea2f353 feature: ZENKO-483 Update Redis key schema 2018-07-05 15:31:01 -07:00
Bennett Buchanan c36280a6e8 feature: ZENKO-483 Monitor CRR upload 2018-07-05 10:51:48 -07:00
bert-e c749725410 Merge branch 'bugfix/ZENKO-579-skip-scan-fix' into q/8.0 2018-07-01 21:30:53 +00:00
Alexander Chan 3d06ec6230 bf: ZENKO-625 fix mongo aggregate params 2018-07-01 12:03:46 -07:00
Jonathan Gramain 159ebb4283 bugfix: ZENKO-433 fix when 'params' is undefined 2018-06-30 19:20:55 -07:00
Alexander Chan e17333b19e ft: ZENKO-597 account for transient source in TDM 2018-06-30 15:11:12 -07:00
philipyoo b3b22292c4 ft: ZENKO-584 add failed CRR metrics route 2018-06-30 08:14:38 -07:00
bert-e 68d27ed5bf Merge branch 'bugfix/ZENKO-603/mongoItemCount' into q/8.0 2018-06-30 04:58:41 +00:00
bert-e 1e79964253 Merge branch 'feature/ZENKO-239-scheduleResumeRoutes' into q/8.0 2018-06-29 22:54:40 +00:00
philipyoo 5f76343c2e ft: ZENKO-239 add schedule resume routes 2018-06-29 15:13:50 -07:00
Alexander Chan d907c9942d bf: use bucketName instead of c.s.name 2018-06-29 12:52:21 -07:00
Alexander Chan c63b0713c0 bf: add more tests 2018-06-29 12:50:49 -07:00
Alexander Chan 6a9a88800a rf: use mongodb aggregate method for item count 2018-06-29 11:56:30 -07:00
Dora Korpar 5834f15397 ft: ZENKO-582 preferred read location
Add preferred read location specification in replication configuration

E.g. <StorageClass>aws,gcp:preferred_read</StorageClass>
2018-06-28 14:31:35 -07:00
bert-e b50f6c4678 Merge branch 'feature/ZENKO-583-crrStatusRoute' into q/8.0 2018-06-28 17:20:54 +00:00
bert-e edeab02107 Merge branch 'feature/pensieve-stats' into q/8.0 2018-06-28 17:17:43 +00:00
David Pineau c64cccdf55 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 18:51:07 +02:00
vrancurel af2b3a4bc3 bugfix: fix versioningGeneral2 test failing with Mongo
When an object has been created without versioning and
the versioning has been enabled, when creating a version
we must consider the case that the object doesn't have
the versionId property.
2018-06-27 18:38:28 -07:00
philipyoo 1e9ad08830 ft: ZENKO-583 add crr status check route 2018-06-27 17:20:11 -07:00
David Pineau 9e66fda610 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-27 18:42:41 +02:00
Rahul Padigela 888e154f0e Merge remote-tracking branch 'origin/feature/ZENKO-267-Routes-MD-Ingestion' into w/8.0/feature/ZENKO-267-Routes-MD-Ingestion 2018-06-26 17:22:02 -07:00
Nicolas Humbert 8448f909e4 FT: push isVersioned and ownerCanonicalId stats 2018-06-26 17:07:16 -07:00
bert-e 2b16e84733 Merge branch 'feature/ZENKO-235-manualPauseResumeRoutes' into q/8.0 2018-06-27 00:00:25 +00:00
philipyoo a1a6f65364 ft: add crr pause/retry/status routes 2018-06-26 16:55:11 -07:00
bert-e 7cf0c97d8e Merge branch 'feature/ZENKO-437_Backbeat_Readiness_Probes' into q/8.0 2018-06-26 23:26:20 +00:00
Taylor McKinnon 10e7b976d5 feat(zenko-437): Add HealthProbeServer 2018-06-26 15:56:54 -07:00
vrancurel e80ea95ad8 bugfix: fix skip scan on Mongo
This allows to skip scans when it is too long to jump
over a prefix. Also it has the side effect of batching
more common prefixes in one s3 list call with delimiter
2018-06-26 14:08:18 -07:00
Jeremy Desanlis 7075318dd2 bf: ZENKO-578 mongoDB error replies
Do not raise an internalError to upper layer when mongoDB fails to
update the master version with a specific error code. This fix is
related to the mongoDB issue: SERVER-19600.

This commit fixes too the message field name of the mongoDB error, it is
'errmsg' and not 'message'.
2018-06-26 14:00:08 -07:00
bert-e 38f68fba1a Merge branch 'bugfix/ZENKO-308-listversion' into q/8.0 2018-06-25 17:45:24 +00:00
vrancurel 16f9a6f5f6 bugfix: list version is incorrect because sometimes
we replace the master with an incorrect last version
 because inserts are sometimes swapped. Add a check
 to be sure we always replace the master with a
 smaller (thus more recent) version.
2018-06-25 09:28:34 -07:00
bert-e c48e4b89bd Merge branch 'feature/ZENKO-315/CRRWithoutVersioning' into q/8.0 2018-06-23 00:00:50 +00:00
Bennett Buchanan 2a8169e936 feature: ZENKO-315 Add NFS properties 2018-06-22 14:02:39 -07:00
Alexander Chan 1af67fffc7 bf: fix mongo counter 2018-06-21 19:58:42 -07:00
Guillaume Gimenez e9ac11b1fe ft: ZENKO-561: bucket attributes handling fixed
on putBucketAttributes and getBucketAttributes
2018-06-20 15:54:43 -07:00
bert-e 30dcd6ef86 Merge branch 'feature/ZENKO-433/countIncUpdateandRefresh' into q/8.0 2018-06-20 22:05:49 +00:00
Alexander Chan 2ce9db4e01 ft: ZENKO-433 add item count support incremental update and refresh 2018-06-20 10:00:57 -07:00
philipyoo 9e234e2b41 bf: zero-fill response for getAllStats 2018-06-13 15:05:38 -07:00
philipyoo 83a831f512 rf: edit monitoring route details 2018-06-13 14:11:45 -07:00
Guillaume Gimenez 32c2a6fe99 FT: Metadata Proxy Server 2018-06-13 10:06:05 -07:00
Rahul Padigela 063361377c chore: update version and dependencies 2018-05-30 16:44:17 -07:00
Rahul Padigela ea7f28c82d
Merge pull request #495 from scality/fwdport/z/1.0-master
Fwdport/z/1.0 master
2018-05-30 08:32:59 -07:00
Rahul Padigela a9e760b32e chore: use correct dependency branches 2018-05-29 17:01:49 -07:00
Rahul Padigela 3b16a307b8 Merge remote-tracking branch 'origin/z/1.0' into fwdport/z/1.0-master 2018-05-29 16:52:11 -07:00
Rahul Padigela f8dfa378a1
Merge pull request #494 from scality/bf/ZENKO-370-restoreMongoOpLogFilteringPerDb
restore mongo op log filtering per db
2018-05-29 09:28:54 -07:00
Jonathan Gramain e16eadb474 bf: ZENKO-370 restore mongo oplog db filtering
For Orbit that has multiple instances per mongo database. This change
restores the filtering per db, but keeps publishing the internal DBs
that have '__' in their name.

Also attempt to fix the original regexp which was matching the '$DB.'
pattern at any place, not at the beginning of the 'ns' field.
2018-05-25 14:54:21 -07:00
Rahul Padigela 5bf7fef53c
Merge pull request #491 from scality/bf/ZENKO-355-byteToMBConversion
bf: ZENKO-355 crr stats byte conversion
2018-05-22 15:00:45 -07:00
philipyoo 659aee2fc2 bf: fix/change byte conversion 2018-05-21 16:07:15 -07:00
Rahul Padigela bde52ab89b
Merge pull request #492 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated-fixInfoEvent
bf: ZENKO-344 don't shunt 'info' event production
2018-05-15 16:24:13 -07:00
Jonathan Gramain 0ddb4da8a9 bf: ZENKO-344 don't shunt 'info' event production
Make sure the mongo consumer produces the 'info' event from the LogConsumer
when the lastEndID has not been reached yet.
2018-05-14 17:31:05 -07:00
Rached Ben Mustapha 56e280236b
Merge pull request #490 from scality/fix/ZENKO-346-no-crash-on-empty-stats
fix: do not crash on empty backbeat stats
2018-05-11 15:32:04 -07:00
Rached Ben Mustapha f904f04401 fix: do not crash on empty backbeat stats 2018-05-11 11:51:12 -07:00
Rahul Padigela db45fee9e8
Merge pull request #487 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated
Bf/zenko 344 mongo log entries duplicated
2018-05-11 10:16:27 -07:00
JianqinWang ecc431c715 bf: typo in oplogReplay 2018-05-11 10:12:03 -07:00
JianqinWang 6f694ae7f4 bf: ZENKO-344 Fix duplicate mongo logs 2018-05-11 10:12:03 -07:00
Rahul Padigela e7862d3922
Merge pull request #489 from scality/bf/ZENKO-343-dontFilterInternalMongoNs
bf: ZENKO-343 remove regexp-based 'ns' filtering
2018-05-10 21:16:59 -07:00
Jonathan Gramain de7ebf70d7 bf: ZENKO-343 remove regexp-based 'ns' filtering
Don't filter internal namespace entries from mongo log, as backbeat
need the metastore entries exposed to process lifecycle entries.
2018-05-10 16:00:07 -07:00
Rahul Padigela 1425f03c1e
Merge pull request #486 from scality/bf/ZENKO-323-relaxMongoCountError
bf: ZENKO-323 relax mongo count error
2018-05-10 09:55:20 -07:00
Alexander Chan ad527911a2 bf: ZENKO-323 relax mongo count error 2018-05-08 17:18:26 -07:00
Rahul Padigela 6c528688ee
Merge pull request #485 from scality/back-porting-master
FX: constructing v4 query auth signature with proxyPath
2018-05-08 15:03:17 -07:00
Nicolas Humbert e53aa2efd2 FX: constructing v4 query auth signature with proxyPath
(cherry picked from commit 160b960607)
2018-05-08 14:51:13 -07:00
Rahul Padigela 873bc9b647
Merge pull request #479 from scality/ft/proxy
FX: constructing v4 query auth signature with proxyPath
2018-05-08 12:01:12 -07:00
Nicolas Humbert 160b960607 FX: constructing v4 query auth signature with proxyPath 2018-05-08 11:55:24 -07:00
Rahul Padigela 843bd1fe13
Merge pull request #484 from scality/bf/ZENKO-314-removeMongoCursorLimit
bf: ZENKO-314 remove mongo cursor limit
2018-05-07 18:40:28 -07:00
Alexander Chan 93a2a79699 bf: remove mongo cursor limit
removed the mongo cursor hard coded limit as that introduced undesired
behavior with small max-keys
2018-05-07 17:58:07 -07:00
Rahul Padigela ef32d5e94d
Merge pull request #481 from scality/bf/mongo-cursor-limit
bf: fix mongo cursor limit
2018-05-03 12:18:01 -07:00
Alexander Chan 45d9c3d999 bf: fix mongo cursor limit
use hard coded limit for the cursor limit
2018-05-02 19:30:39 -07:00
Rahul Padigela a2ce46d8d0
Merge pull request #478 from scality/ft/Zenko-21/prom-route
Zenko-21: FT: New Route for Prometheus Client
2018-05-02 12:38:34 -07:00
anurag4DSB 0c0bffa2c3 ft: ZENKO-21 add prometheus monitoring route
Signed-off-by: anurag4DSB <anurag.213@gmail.com>
2018-05-02 12:29:57 -07:00
ironman-machine d966c0bda9 merge #477 2018-05-02 01:36:01 +00:00
Rahul Padigela cb86a857cc
Merge pull request #476 from scality/bf/mongo-list-object-limit
Bf/mongo list object limit
2018-05-01 16:56:13 -07:00
Alexander Chan 55c9441bd7 bf: add mongo list-object max limit 2018-05-01 10:32:01 -07:00
David Pineau cae55a65c8 ft: interpret healthcheck status from bucketd
As discussed in S3C-1412, it is necessary for S3 to interpret Bucketd health
status in order to provide more flexibility (relatively to the health check
mechanism) than failing due to a light partial unavailability of the platform.

This is done in the bucketclient backend's healthcheck method, in order to
comply with all the other backends.

Fixes S3C-1412

Signed-off-by: David Pineau <david.pineau@scality.com>
2018-04-30 15:58:05 -07:00
philipyoo 114cbf5571 bf: use expired interval to avg out throughput
When an interval of data in Redis expires, throughput will
abruptly reduce. Given the data we collect, we can only
calculate the average throughput. To ease the erratic
decrease on expiration of an interval, instead, get the
average of elapsed time for the newest interval and
remaining time of the interval multiplied against the
average of the just-expired interval.

In Redis, we need to save an extra interval to reference
the just-expired data.
2018-04-30 10:23:22 -07:00
Alexander Chan f2bab3b3d6 ft: ZENKO-262 update bucketInfo model 2018-04-30 10:23:22 -07:00
philipyoo 3276d235bb bf: do not include current UploadIdMarker in list
for in_memory only
listMultipartUpload should not list current marker in the
listing. Previously, it would include the marker as the
first item in the list
2018-04-30 10:23:22 -07:00
philipyoo ee2aed10f3 ft: add uid property to all buckets 2018-04-24 10:07:58 -07:00
Rahul Padigela 19bee770ea chore: update scality dependencies 2018-04-23 12:23:58 -07:00
Rahul Padigela e0c5d03436 chore: update version and author 2018-04-23 12:18:42 -07:00
Rahul Padigela c8a7148645
Merge pull request #472 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-23 00:07:18 -07:00
Rahul Padigela 8ca5dce4fe Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-23 00:03:04 -07:00
Bennett Buchanan 599fb5709b
Merge pull request #470 from scality/rf/S3C-1399/refactor-backbeat-metrics-into-arsenal
rf: S3C-1399 Add Backbeat metrics and routes
2018-04-20 16:30:39 -07:00
Rahul Padigela 1161d5f75d
Merge pull request #471 from scality/fwdport/7.4-7.4-beta
Fwdport/7.4 7.4 beta
2018-04-19 11:04:15 -07:00
Rahul Padigela 26b6c5d1d9 Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta 2018-04-19 11:00:33 -07:00
Bennett Buchanan 8fd50cd20e rf: S3C-1399 Add Backbeat metrics and routes 2018-04-18 16:46:07 -07:00
Rahul Padigela 1f6b5bf2bd
Merge pull request #469 from scality/fix/less-verbose-report
Do not log report requests
2018-04-16 13:18:45 -07:00
Rached Ben Mustapha a7813daea9 Do not log report requests 2018-04-16 11:55:44 -07:00
Rahul Padigela 5d4eb84425
Merge pull request #468 from scality/ft/mongo-caching
add mongo client caching
2018-04-13 17:39:51 -07:00
Alexander Chan 9511fff479 add mongo client bucket/object metrics caching 2018-04-13 17:25:03 -07:00
Rahul Padigela d70f64a6d0
Merge pull request #465 from scality/fx/data-managed-count
fx: correct data managed count
2018-04-13 16:48:01 -07:00
Alexander Chan ee66dc811c fx: correct data managed count
fixes issue with mongoclient countItems
+ accounts for data stored directly to external backend
+ adds check to handle versioned bucket and non-versioned buckets differently
2018-04-13 16:32:43 -07:00
Rahul Padigela 2710471726
Merge pull request #467 from scality/backport/master-rel/7.4-beta
Backport: master to rel/7.4-beta
2018-04-10 17:57:54 -07:00
Dora Korpar 9aee9f6cf0 ft: extract function for date modified headers
(cherry picked from commit 92da4c90e5)
2018-04-10 17:44:22 -07:00
Rahul Padigela a168fab266
Merge pull request #435 from scality/ft/objdel-add-modified-header-check
Ft/objdel add modified header check
2018-04-10 17:30:54 -07:00
Dora Korpar 92da4c90e5 ft: extract function for date modified headers 2018-04-10 17:05:03 -07:00
Rahul Padigela a95d5ea15d
Merge pull request #464 from scality/fix/flaky-mongo
Fixes flakiness in S3 functional tests with mongo backend
2018-04-07 22:16:56 -07:00
Salim aad05faa12 Fixes flakiness in S3 functional tests with mongo backend 2018-04-06 17:27:24 -07:00
Rahul Padigela ab230ebfe7
Merge pull request #463 from scality/fix/mongo-tests
Fix/mongo tests
2018-04-06 16:48:53 -07:00
Salim b3103e1307 ZENKO-227 fix: mongodb versioning
This fixes the problem where if the version ID is passed, it will cause
an Internal error failure because it was trying to create a new object
with the same key value. This adds a check to see if the object exists
first then updates and upserts accordingly.
2018-04-06 15:41:09 -07:00
Salim f3b0091210 feat: add error KeyAlreadyExists 2018-04-06 11:19:37 -07:00
Rahul Padigela f633b91072
Merge pull request #460 from scality/ft/add-data-managed
ft; add data managed metrics
2018-04-06 10:47:06 -07:00
Alexander Chan 87807462dc ft; add data managed metrics 2018-04-05 10:17:26 -07:00
Rahul Padigela d7f114d504
Merge pull request #461 from scality/fix/skip-mpu-bucket-prefix
Skip MPU shadow buckets
2018-04-03 14:42:28 -07:00
Rached Ben Mustapha 5ef168e654 Skip MPU shadow buckets 2018-04-03 13:20:57 -07:00
Rahul Padigela 82b4055c6c
Merge pull request #459 from scality/fix/stuck-replication
Fix/stuck replication
2018-04-02 14:55:15 -07:00
Rached Ben Mustapha 91ccccfe85 Remove use of global variable 2018-04-02 14:49:04 -07:00
Rached Ben Mustapha 696999874b Fix replication stream getting stuck
The mongodb transform stream would never actually emit any objects
to the extensions.
2018-04-02 14:31:54 -07:00
Rached Ben Mustapha d2bed3bf9a Un-hardcode mongodb database name 2018-04-02 14:31:07 -07:00
Rahul Padigela ad42baa5ff
Merge pull request #458 from scality/fix/mongologreader-contract
Fix/mongologreader contract
2018-04-02 12:07:20 -07:00
Rached Ben Mustapha 6ac92b2ad2 Fix mongodb log consumer initial values
Pre-existing LogConsumer contract uses `null` for initial values,
`undefined` breaks client code assumptions.
2018-04-02 11:31:02 -07:00
Rahul Padigela 13dbf48867
Merge pull request #457 from scality/ft/initial-instance-id
Ft/initial instance
2018-03-30 16:59:11 -07:00
Rached Ben Mustapha e79ad68e96 S3C-1355 Use provided instance id 2018-03-30 16:24:51 -07:00
Rahul Padigela a4a5fe0db0
Merge pull request #456 from scality/ft/ZENKO-147/crr-retry-kafka
FT: Add objectMD setters for replicationInfo
2018-03-30 11:37:22 -07:00
Bennett Buchanan f838fcc31f FT: Add objectMD setters for replicationInfo 2018-03-29 16:27:48 -07:00
VR eb9dd23b14
Merge pull request #455 from scality/ZENKO-222-bf-mongo-url
ZENKO-222 bf: revert mongo url
2018-03-28 18:03:19 -07:00
JianqinWang edbf7ab650 ZENKO-222 bf: revert mongo url 2018-03-28 17:57:59 -07:00
Rahul Padigela e068950903
Merge pull request #453 from scality/forward/orbit
Forward/orbit
2018-03-28 16:25:55 -07:00
Rahul Padigela 1ceb7b264c chore: remove branch version from package.json 2018-03-28 16:03:42 -07:00
vrancurel 5a29aaa10c fixing metadata search broken by commit ea8d523501fcd996447986318e59a95e729563b0 2018-03-28 16:03:42 -07:00
Rahul Padigela 7587f7ba25 ft: update version 2018-03-28 16:03:42 -07:00
Rahul Padigela 795b145594
Merge pull request #452 from scality/add-bson-to-dependencies
add bson to dependencies
2018-03-28 11:16:01 -07:00
Jeremy Desanlis 58f027a693 add bson to dependencies 2018-03-27 18:36:13 -07:00
Rahul Padigela e09348d658
Merge pull request #451 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 17:22:15 -07:00
Alexander Chan bddb90c6a1 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-27 15:49:03 -07:00
Rahul Padigela 94efaaccc2
Merge pull request #446 from scality/ft/S3C-1327-add-bucketinfo-uid-prop
ft: Add uid property to BucketInfo
2018-03-26 07:00:50 -07:00
Rahul Padigela 463a8ebe15
Merge pull request #448 from scality/fwd/7.4-to-7.4-beta
Fwd: 7.4 to 7.4 beta
2018-03-26 07:00:19 -07:00
philipyoo f17ce17857 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-to-7.4-beta 2018-03-23 10:24:36 -07:00
Rahul Padigela 3a5250e2e9
Merge pull request #437 from scality/ft/S3C-1148-statsclient-multiple-ids
Ft/S3C-1148 statsclient multiple ids
2018-03-22 14:56:05 -07:00
ironman-machine 48cb7b3b05 merge #447 2018-03-21 18:44:42 +00:00
Nicolas Humbert 84c4c147a2 FIX: Mongo Client - countItems 2018-03-20 17:54:59 -07:00
Rahul Padigela 958e818655
Merge pull request #445 from scality/fwd/7.4-beta-master
Fwd/7.4 beta master
2018-03-20 14:51:33 -07:00
philipyoo 91dd219c47 ft: Add uid property to BucketInfo
Needed for lifecycle processing in backbeat
2018-03-19 18:56:52 -07:00
Alexander Chan 5f3d478edb Merge remote-tracking branch 'origin/rel/7.4-beta' 2018-03-19 15:49:24 -07:00
Rahul Padigela 04d56cfdff ft: update version number 2018-03-14 13:28:10 -07:00
Rahul Padigela 73dd529c29 ft: update package.json dependencies 2018-03-14 13:08:44 -07:00
philipyoo a9aa40c168 ft: extend statsclient to query by list of ids
Extend support for querying a list of ids and
returning a total sum of results for that list.

Also add wrapper for redis method `keys`
2018-03-14 11:22:15 -07:00
ironman-machine 189194a4e7 merge #433 2018-03-08 20:39:29 +00:00
JianqinWang a9a6b2433d rf: remove use of util.format 2018-03-08 09:22:28 -08:00
JianqinWang fa19fc8859 rf: name change for replica set hosts 2018-03-08 09:22:28 -08:00
JianqinWang a269619698 ZENKO-15 ft: oplog tailer for MongoDB 2018-03-08 09:22:28 -08:00
Rahul Padigela da1da43597
Merge pull request #438 from scality/fwdport/7.4-master
Fwdport/7.4 master
2018-03-08 00:26:46 -08:00
Rahul Padigela caac4e4e7e Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-master 2018-03-07 19:08:15 -08:00
Rahul Padigela 67250133dc
Merge pull request #436 from scality/bf/missing-bucketclient-param
bf: fix missing param needed for BCI
2018-03-06 16:43:35 -08:00
JianqinWang d3f3be03ae bf: fix missing param needed for BCI 2018-03-06 16:39:54 -08:00
ironman-machine 1a9f1afd2c merge #425 2018-03-06 18:25:58 +00:00
JianqinWang 9a5afdbc5c rf: rename mongo replicaset hosts 2018-03-05 17:28:11 -08:00
JianqinWang 83cf54512b ZENKO-140 rf: extract metadata backends from S3 2018-03-05 16:33:38 -08:00
ironman-machine 7e3ad64456 merge #432 2018-02-24 01:27:51 +00:00
Nicolas Humbert eba0cb6116 FT: add proxy_path header 2018-02-22 17:16:43 -08:00
Lauren Spiegel fd23e82ab9
Merge pull request #419 from scality/fix/replaceUpdate
Fix/stopSwallowingErrors
2018-02-14 13:08:05 -08:00
Lauren Spiegel d7cf5e8ccf FIX: Stop swallowing errors 2018-02-14 13:02:22 -08:00
flavien-scality d0f4f95f0d
Merge pull request #417 from scality/fwd/7.4-master
Fwd/7.4 master (try succeeded)
2018-02-14 09:54:16 +01:00
Alexandre Merle 0e606b1061 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-master 2018-02-14 04:24:23 +01:00
ironman-machine 44ead88d83 merge #420 2018-02-13 19:32:13 +00:00
vrancurel d8e1497940 use hosts config instead of host and port 2018-02-12 15:24:51 -08:00
ThibaultRiviere 4193394340
Merge pull request #407 from scality/fwdport_7.4_master
Fwdport 7.4 master
2018-02-07 13:42:22 +01:00
Thibault Riviere 0f1b0dad01 Merge branch 'rel/7.4' into fwdport_7.4_master 2018-02-07 13:34:03 +01:00
ironman-machine 393d6edc07 merge #408 2018-02-06 23:55:05 +00:00
vrancurel 70638eaf7a support search in Mongo 2018-02-06 14:18:51 -08:00
Lauren Spiegel 9d0156dfdf
Merge pull request #403 from scality/welcome/mongo
Welcome/mongo
2018-02-02 15:10:44 -08:00
Lauren Spiegel 8d8028b83f CHORE: Change filename 2018-02-02 12:08:49 -08:00
Lauren Spiegel b99fe2cd8d Changes to client due to move 2018-02-02 12:08:46 -08:00
Lauren Spiegel cc26f288be Move mongoclient from s3 to arsenal 2018-02-02 11:34:45 -08:00
162 changed files with 11287 additions and 3433 deletions

View File

@ -39,10 +39,14 @@ jobs:
run: yarn --silent lint -- --max-warnings 0
- name: lint markdown
run: yarn --silent lint_md
- name: run unit tests
run: yarn test
- name: add hostname
run: |
sudo sh -c "echo '127.0.0.1 testrequestbucket.localhost' >> /etc/hosts"
- name: test and coverage
run: yarn --silent coverage
- name: run functional tests
run: yarn ft_test
- uses: codecov/codecov-action@v2
- name: run executables tests
run: yarn install && yarn test
working-directory: 'lib/executables/pensieveCreds/'

View File

@ -1,5 +1,7 @@
# Arsenal
[![codecov](https://codecov.io/gh/scality/Arsenal/branch/development/8.1/graph/badge.svg?token=X0esXhJSwb)](https://codecov.io/gh/scality/Arsenal)
Common utilities for the S3 project components
Within this repository, you will be able to find the shared libraries for the

View File

@ -85,6 +85,66 @@ Used to store the bucket lifecycle configuration info
### Properties Added
```javascript
this._uid = uid || uuid();
```
### Usage
Used to set a unique identifier on a bucket
## Model version 8
### Properties Added
```javascript
this._readLocationConstraint = readLocationConstraint || null;
```
### Usage
Used to store default read location of the bucket
## Model version 9
### Properties Added
```javascript
this._isNFS = isNFS || null;
```
### Usage
Used to determine whether the bucket may be accessed through NFS
## Model version 10
### Properties Added
```javascript
this._ingestion = ingestionConfig || null;
```
### Usage
Used to store the ingestion status of a bucket
## Model version 11
### Properties Added
```javascript
this._azureInfo = azureInfo || null;
```
### Usage
Used to store Azure storage account specific information
## Model version 12
### Properties Added
```javascript
this._objectLockEnabled = objectLockEnabled || false;
this._objectLockConfiguration = objectLockConfiguration || null;
@ -95,7 +155,7 @@ this._objectLockConfiguration = objectLockConfiguration || null;
Used to determine whether object lock capabilities are enabled on a bucket and
to store the object lock configuration of the bucket
## Model version 8
## Model version 13
### Properties Added
@ -107,7 +167,7 @@ this._notificationConfiguration = notificationConfiguration || null;
Used to store the bucket notification configuration info
## Model version 9
## Model version 14
### Properties Added
@ -118,15 +178,3 @@ this._serverSideEncryption.configuredMasterKeyId = configuredMasterKeyId || unde
### Usage
Used to store the users configured KMS key id
## Model version 10
### Properties Added
```javascript
this._uid = uid || uuid();
```
### Usage
Used to set a unique identifier on a bucket

View File

@ -26,7 +26,7 @@
},
"BucketAlreadyOwnedByYou": {
"code": 409,
"description": "Your previous request to create the named bucket succeeded and you already own it. You get this error in all AWS regions except US Standard, us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists S3 will not do anything)."
"description": "A bucket with this name exists and is already owned by you"
},
"BucketNotEmpty": {
"code": 409,
@ -403,6 +403,10 @@
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"KeyAlreadyExists": {
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"ServiceFailure": {
"code": 500,
"description": "Server error: the request processing has failed because of an unknown error, exception or failure."
@ -760,5 +764,10 @@
"ReadOnly": {
"description": "trying to write to read only back-end",
"code": 403
},
"_comment": "----------------------- authbackend -----------------------",
"AuthMethodNotImplemented": {
"description": "AuthMethodNotImplemented",
"code": 501
}
}

28
greenkeeper.json Normal file
View File

@ -0,0 +1,28 @@
{
"groups": {
"default": {
"packages": [
"lib/executables/pensieveCreds/package.json",
"package.json"
]
}
},
"branchPrefix": "improvement/greenkeeper.io/",
"commitMessages": {
"initialBadge": "docs(readme): add Greenkeeper badge",
"initialDependencies": "chore(package): update dependencies",
"initialBranches": "chore(bert-e): whitelist greenkeeper branches",
"dependencyUpdate": "fix(package): update ${dependency} to version ${version}",
"devDependencyUpdate": "chore(package): update ${dependency} to version ${version}",
"dependencyPin": "fix: pin ${dependency} to ${oldVersionResolved}",
"devDependencyPin": "chore: pin ${dependency} to ${oldVersionResolved}",
"closes": "\n\nCloses #${number}"
},
"ignore": [
"ajv",
"eslint",
"eslint-plugin-react",
"eslint-config-airbnb",
"eslint-config-scality"
]
}

202
index.js Normal file
View File

@ -0,0 +1,202 @@
module.exports = {
auth: require('./lib/auth/auth'),
constants: require('./lib/constants'),
db: require('./lib/db'),
errors: require('./lib/errors.js'),
errorUtils: require('./lib/errorUtils'),
shuffle: require('./lib/shuffle'),
stringHash: require('./lib/stringHash'),
ipCheck: require('./lib/ipCheck'),
jsutil: require('./lib/jsutil'),
https: {
ciphers: require('./lib/https/ciphers.js'),
dhparam: require('./lib/https/dh2048.js'),
},
algorithms: {
list: require('./lib/algos/list/exportAlgos'),
listTools: {
DelimiterTools: require('./lib/algos/list/tools'),
Skip: require('./lib/algos/list/skip'),
},
cache: {
LRUCache: require('./lib/algos/cache/LRUCache'),
},
stream: {
MergeStream: require('./lib/algos/stream/MergeStream'),
},
SortedSet: require('./lib/algos/set/SortedSet'),
},
policies: {
evaluators: require('./lib/policyEvaluator/evaluator.js'),
validateUserPolicy: require('./lib/policy/policyValidator')
.validateUserPolicy,
evaluatePrincipal: require('./lib/policyEvaluator/principal'),
RequestContext: require('./lib/policyEvaluator/RequestContext.js'),
requestUtils: require('./lib/policyEvaluator/requestUtils'),
actionMaps: require('./lib/policyEvaluator/utils/actionMaps'),
},
Clustering: require('./lib/Clustering'),
testing: {
matrix: require('./lib/testing/matrix.js'),
},
versioning: {
VersioningConstants: require('./lib/versioning/constants.js')
.VersioningConstants,
Version: require('./lib/versioning/Version.js').Version,
VersionID: require('./lib/versioning/VersionID.js'),
WriteGatheringManager: require('./lib/versioning/WriteGatheringManager.js'),
WriteCache: require('./lib/versioning/WriteCache.js'),
VersioningRequestProcessor: require('./lib/versioning/VersioningRequestProcessor.js'),
},
network: {
http: {
server: require('./lib/network/http/server'),
utils: require('./lib/network/http/utils'),
},
rpc: require('./lib/network/rpc/rpc'),
level: require('./lib/network/rpc/level-net'),
rest: {
RESTServer: require('./lib/network/rest/RESTServer'),
RESTClient: require('./lib/network/rest/RESTClient'),
},
RoundRobin: require('./lib/network/RoundRobin'),
probe: {
ProbeServer: require('./lib/network/probe/ProbeServer'),
HealthProbeServer:
require('./lib/network/probe/HealthProbeServer.js'),
Utils: require('./lib/network/probe/Utils.js'),
},
kmip: require('./lib/network/kmip'),
kmipClient: require('./lib/network/kmip/Client'),
},
s3routes: {
routes: require('./lib/s3routes/routes'),
routesUtils: require('./lib/s3routes/routesUtils'),
},
s3middleware: {
userMetadata: require('./lib/s3middleware/userMetadata'),
convertToXml: require('./lib/s3middleware/convertToXml'),
escapeForXml: require('./lib/s3middleware/escapeForXml'),
objectLegalHold: require('./lib/s3middleware/objectLegalHold'),
tagging: require('./lib/s3middleware/tagging'),
checkDateModifiedHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.checkDateModifiedHeaders,
validateConditionalHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.validateConditionalHeaders,
MD5Sum: require('./lib/s3middleware/MD5Sum'),
NullStream: require('./lib/s3middleware/nullStream'),
objectUtils: require('./lib/s3middleware/objectUtils'),
azureHelper: {
mpuUtils:
require('./lib/s3middleware/azureHelpers/mpuUtils'),
ResultsCollector:
require('./lib/s3middleware/azureHelpers/ResultsCollector'),
SubStreamInterface:
require('./lib/s3middleware/azureHelpers/SubStreamInterface'),
},
prepareStream: require('./lib/s3middleware/prepareStream'),
processMpuParts: require('./lib/s3middleware/processMpuParts'),
retention: require('./lib/s3middleware/objectRetention'),
lifecycleHelpers: require('./lib/s3middleware/lifecycleHelpers'),
},
storage: {
metadata: {
MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
bucketclient: {
BucketClientInterface:
require('./lib/storage/metadata/bucketclient/' +
'BucketClientInterface'),
LogConsumer:
require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
file: {
BucketFileInterface:
require('./lib/storage/metadata/file/BucketFileInterface'),
MetadataFileServer:
require('./lib/storage/metadata/file/MetadataFileServer'),
MetadataFileClient:
require('./lib/storage/metadata/file/MetadataFileClient'),
},
inMemory: {
metastore:
require('./lib/storage/metadata/in_memory/metastore'),
metadata: require('./lib/storage/metadata/in_memory/metadata'),
bucketUtilities:
require('./lib/storage/metadata/in_memory/bucket_utilities'),
},
mongoclient: {
MongoClientInterface:
require('./lib/storage/metadata/mongoclient/' +
'MongoClientInterface'),
LogConsumer:
require('./lib/storage/metadata/mongoclient/LogConsumer'),
},
proxy: {
Server: require('./lib/storage/metadata/proxy/Server'),
},
},
data: {
DataWrapper: require('./lib/storage/data/DataWrapper'),
MultipleBackendGateway:
require('./lib/storage/data/MultipleBackendGateway'),
parseLC: require('./lib/storage/data/LocationConstraintParser'),
file: {
DataFileStore:
require('./lib/storage/data/file/DataFileStore'),
DataFileInterface:
require('./lib/storage/data/file/DataFileInterface'),
},
external: {
AwsClient: require('./lib/storage/data/external/AwsClient'),
AzureClient: require('./lib/storage/data/external/AzureClient'),
GcpClient: require('./lib/storage/data/external/GcpClient'),
GCP: require('./lib/storage/data/external/GCP/GcpService'),
GcpUtils: require('./lib/storage/data/external/GCP/GcpUtils'),
GcpSigner: require('./lib/storage/data/external/GCP/GcpSigner'),
PfsClient: require('./lib/storage/data/external/PfsClient'),
backendUtils: require('./lib/storage/data/external/utils'),
},
inMemory: {
datastore: require('./lib/storage/data/in_memory/datastore'),
},
},
utils: require('./lib/storage/utils'),
},
models: {
BackendInfo: require('./lib/models/BackendInfo'),
BucketInfo: require('./lib/models/BucketInfo'),
BucketAzureInfo: require('./lib/models/BucketAzureInfo'),
ObjectMD: require('./lib/models/ObjectMD'),
ObjectMDLocation: require('./lib/models/ObjectMDLocation'),
ObjectMDAzureInfo: require('./lib/models/ObjectMDAzureInfo'),
ARN: require('./lib/models/ARN'),
WebsiteConfiguration: require('./lib/models/WebsiteConfiguration'),
ReplicationConfiguration:
require('./lib/models/ReplicationConfiguration'),
LifecycleConfiguration:
require('./lib/models/LifecycleConfiguration'),
LifecycleRule: require('./lib/models/LifecycleRule'),
BucketPolicy: require('./lib/models/BucketPolicy'),
ObjectLockConfiguration:
require('./lib/models/ObjectLockConfiguration'),
NotificationConfiguration:
require('./lib/models/NotificationConfiguration'),
},
metrics: {
StatsClient: require('./lib/metrics/StatsClient'),
StatsModel: require('./lib/metrics/StatsModel'),
RedisClient: require('./lib/metrics/RedisClient'),
ZenkoMetrics: require('./lib/metrics/ZenkoMetrics'),
},
pensieve: {
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
},
stream: {
readJSONStreamObject: require('./lib/stream/readJSONStreamObject'),
},
patches: {
locationConstraints: require('./lib/patches/locationConstraints'),
},
};

View File

@ -2,6 +2,7 @@ export const auth = require('./lib/auth/auth');
export const constants = require('./lib/constants');
export const db = require('./lib/db');
export const errors = require('./lib/errors.js');
export const errorUtils = require('./lib/errorUtils');
export const shuffle = require('./lib/shuffle');
export const stringHash = require('./lib/stringHash');
export const ipCheck = require('./lib/ipCheck');
@ -14,15 +15,10 @@ export const https = {
};
export const algorithms = {
list: {
Basic: require('./lib/algos/list/basic').List,
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
DelimiterVersions: require('./lib/algos/list/delimiterVersions').DelimiterVersions,
DelimiterMaster: require('./lib/algos/list/delimiterMaster').DelimiterMaster,
MPU: require('./lib/algos/list/MPU').MultipartUploads,
},
list: require('./lib/algos/list/exportAlgos'),
listTools: {
DelimiterTools: require('./lib/algos/list/tools'),
Skip: require('./lib/algos/list/skip'),
},
cache: {
LRUCache: require('./lib/algos/cache/LRUCache'),
@ -58,6 +54,7 @@ export const versioning = {
export const network = {
http: {
server: require('./lib/network/http/server'),
utils: require('./lib/network/http/utils'),
},
rpc: require('./lib/network/rpc/rpc'),
level: require('./lib/network/rpc/level-net'),
@ -65,10 +62,13 @@ export const network = {
RESTServer: require('./lib/network/rest/RESTServer'),
RESTClient: require('./lib/network/rest/RESTClient'),
},
RoundRobin: require('./lib/network/RoundRobin'),
probe: {
ProbeServer: require('./lib/network/probe/ProbeServer'),
HealthProbeServer:
require('./lib/network/probe/HealthProbeServer.js'),
Utils: require('./lib/network/probe/Utils.js'),
},
RoundRobin: require('./lib/network/RoundRobin'),
kmip: require('./lib/network/kmip'),
kmipClient: require('./lib/network/kmip/Client'),
};
@ -84,16 +84,24 @@ export const s3middleware = {
escapeForXml: require('./lib/s3middleware/escapeForXml'),
objectLegalHold: require('./lib/s3middleware/objectLegalHold'),
tagging: require('./lib/s3middleware/tagging'),
checkDateModifiedHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.checkDateModifiedHeaders,
validateConditionalHeaders:
require('./lib/s3middleware/validateConditionalHeaders').validateConditionalHeaders,
require('./lib/s3middleware/validateConditionalHeaders')
.validateConditionalHeaders,
MD5Sum: require('./lib/s3middleware/MD5Sum'),
NullStream: require('./lib/s3middleware/nullStream'),
objectUtils: require('./lib/s3middleware/objectUtils'),
azureHelper: {
mpuUtils: require('./lib/s3middleware/azureHelpers/mpuUtils'),
ResultsCollector: require('./lib/s3middleware/azureHelpers/ResultsCollector'),
SubStreamInterface: require('./lib/s3middleware/azureHelpers/SubStreamInterface'),
mpuUtils:
require('./lib/s3middleware/azureHelpers/mpuUtils'),
ResultsCollector:
require('./lib/s3middleware/azureHelpers/ResultsCollector'),
SubStreamInterface:
require('./lib/s3middleware/azureHelpers/SubStreamInterface'),
},
prepareStream: require('./lib/s3middleware/prepareStream'),
processMpuParts: require('./lib/s3middleware/processMpuParts'),
retention: require('./lib/s3middleware/objectRetention'),
lifecycleHelpers: require('./lib/s3middleware/lifecycleHelpers'),
@ -164,17 +172,24 @@ export const storage = {
};
export const models = {
BackendInfo: require('./lib/models/BackendInfo'),
BucketInfo: require('./lib/models/BucketInfo'),
BucketAzureInfo: require('./lib/models/BucketAzureInfo'),
ObjectMD: require('./lib/models/ObjectMD'),
ObjectMDLocation: require('./lib/models/ObjectMDLocation'),
ObjectMDAzureInfo: require('./lib/models/ObjectMDAzureInfo'),
ARN: require('./lib/models/ARN'),
WebsiteConfiguration: require('./lib/models/WebsiteConfiguration'),
ReplicationConfiguration: require('./lib/models/ReplicationConfiguration'),
LifecycleConfiguration: require('./lib/models/LifecycleConfiguration'),
ReplicationConfiguration:
require('./lib/models/ReplicationConfiguration'),
LifecycleConfiguration:
require('./lib/models/LifecycleConfiguration'),
LifecycleRule: require('./lib/models/LifecycleRule'),
BucketPolicy: require('./lib/models/BucketPolicy'),
ObjectLockConfiguration: require('./lib/models/ObjectLockConfiguration'),
NotificationConfiguration: require('./lib/models/NotificationConfiguration'),
ObjectLockConfiguration:
require('./lib/models/ObjectLockConfiguration'),
NotificationConfiguration:
require('./lib/models/NotificationConfiguration'),
};
export const metrics = {
@ -191,3 +206,7 @@ export const pensieve = {
export const stream = {
readJSONStreamObject: require('./lib/stream/readJSONStreamObject'),
};
export const patches = {
locationConstraints: require('./lib/patches/locationConstraints'),
};

View File

@ -91,7 +91,7 @@ class Vault {
requestContext: serializedRCsArr,
},
(err, userInfo) => vaultSignatureCb(err, userInfo,
params.log, callback)
params.log, callback),
);
}
@ -146,7 +146,7 @@ class Vault {
requestContext: serializedRCs,
},
(err, userInfo) => vaultSignatureCb(err, userInfo,
params.log, callback, streamingV4Params)
params.log, callback, streamingV4Params),
);
}
@ -232,28 +232,28 @@ class Vault {
*/
getAccountIds(canonicalIDs, log, callback) {
log.trace('getting accountIds from Vault based on canonicalIDs',
{ canonicalIDs });
{ canonicalIDs });
this.client.getAccountIds(canonicalIDs,
{ reqUid: log.getSerializedUids() },
(err, info) => {
if (err) {
log.debug('received error message from vault',
{ errorMessage: err });
return callback(err);
}
const infoFromVault = info.message.body;
log.trace('info received from vault', { infoFromVault });
const result = {};
/* If the accountId was not found in Vault, do not
send the canonicalID back to the API */
Object.keys(infoFromVault).forEach(key => {
if (infoFromVault[key] !== 'NotFound' &&
infoFromVault[key] !== 'WrongFormat') {
result[key] = infoFromVault[key];
{ reqUid: log.getSerializedUids() },
(err, info) => {
if (err) {
log.debug('received error message from vault',
{ errorMessage: err });
return callback(err);
}
const infoFromVault = info.message.body;
log.trace('info received from vault', { infoFromVault });
const result = {};
/* If the accountId was not found in Vault, do not
send the canonicalID back to the API */
Object.keys(infoFromVault).forEach(key => {
if (infoFromVault[key] !== 'NotFound' &&
infoFromVault[key] !== 'WrongFormat') {
result[key] = infoFromVault[key];
}
});
return callback(null, result);
});
return callback(null, result);
});
}
/** checkPolicies -- call Vault to evaluate policies

View File

@ -10,11 +10,13 @@ const constants = require('../constants');
const constructStringToSignV2 = require('./v2/constructStringToSign');
const constructStringToSignV4 = require('./v4/constructStringToSign');
const convertUTCtoISO8601 = require('./v4/timeUtils').convertUTCtoISO8601;
const vaultUtilities = require('./in_memory/vaultUtilities');
const backend = require('./in_memory/Backend');
const validateAuthConfig = require('./in_memory/validateAuthConfig');
const AuthLoader = require('./in_memory/AuthLoader');
const vaultUtilities = require('./backends/in_memory/vaultUtilities');
const inMemoryBackend = require('./backends/in_memory/Backend');
const validateAuthConfig = require('./backends/in_memory/validateAuthConfig');
const AuthLoader = require('./backends/in_memory/AuthLoader');
const Vault = require('./Vault');
const baseBackend = require('./backends/base');
const chainBackend = require('./backends/ChainBackend');
let vault = null;
const auth = {};
@ -72,7 +74,7 @@ function extractParams(request, log, awsService, data) {
version = 'v4';
} else {
log.trace('invalid authorization security header',
{ header: authHeader });
{ header: authHeader });
return { err: errors.AccessDenied };
}
} else if (data.Signature) {
@ -87,7 +89,7 @@ function extractParams(request, log, awsService, data) {
if (version !== null && method !== null) {
if (!checkFunctions[version] || !checkFunctions[version][method]) {
log.trace('invalid auth version or method',
{ version, authMethod: method });
{ version, authMethod: method });
return { err: errors.NotImplemented };
}
log.trace('identified auth method', { version, authMethod: method });
@ -159,7 +161,7 @@ function doAuth(request, log, cb, awsService, requestContexts) {
* @return {undefined}
*/
function generateV4Headers(request, data, accessKey, secretKeyValue,
awsService, proxyPath, sessionToken) {
awsService, proxyPath, sessionToken) {
Object.assign(request, { headers: {} });
const amzDate = convertUTCtoISO8601(Date.now());
// get date without time
@ -192,16 +194,16 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
.filter(headerName =>
headerName.startsWith('x-amz-')
|| headerName.startsWith('x-scal-')
|| headerName === 'host'
|| headerName === 'host',
).sort().join(';');
const params = { request, signedHeaders, payloadChecksum,
credentialScope, timestamp, query: data,
awsService: service, proxyPath };
const stringToSign = constructStringToSignV4(params);
const signingKey = vaultUtilities.calculateSigningKey(secretKeyValue,
region,
scopeDate,
service);
region,
scopeDate,
service);
const signature = crypto.createHmac('sha256', signingKey)
.update(stringToSign, 'binary').digest('hex');
const authorizationHeader = `${algorithm} Credential=${accessKey}` +
@ -222,10 +224,14 @@ module.exports = {
constructStringToSignV2,
},
inMemory: {
backend,
backend: inMemoryBackend,
validateAuthConfig,
AuthLoader,
},
backends: {
baseBackend,
chainBackend,
},
AuthInfo,
Vault,
};

View File

@ -0,0 +1,189 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const errors = require('../../errors');
const BaseBackend = require('./base');
/**
* Class that provides an authentication backend that will verify signatures
* and retrieve emails and canonical ids associated with an account using a
* given list of authentication backends and vault clients.
*
* @class ChainBackend
*/
class ChainBackend extends BaseBackend {
/**
* @constructor
* @param {string} service - service id
* @param {object[]} clients - list of authentication backends or vault clients
*/
constructor(service, clients) {
super(service);
assert(Array.isArray(clients) && clients.length > 0, 'invalid client list');
assert(clients.every(client =>
typeof client.verifySignatureV4 === 'function' &&
typeof client.verifySignatureV2 === 'function' &&
typeof client.getCanonicalIds === 'function' &&
typeof client.getEmailAddresses === 'function' &&
typeof client.checkPolicies === 'function' &&
typeof client.healthcheck === 'function',
), 'invalid client: missing required auth backend methods');
this._clients = clients;
}
/*
* try task against each client for one to be successful
*/
_tryEachClient(task, cb) {
async.tryEach(this._clients.map(client => done => task(client, done)), cb);
}
/*
* apply task to all clients
*/
_forEachClient(task, cb) {
async.map(this._clients, task, cb);
}
verifySignatureV2(stringToSign, signatureFromRequest, accessKey, options, callback) {
this._tryEachClient((client, done) => client.verifySignatureV2(
stringToSign,
signatureFromRequest,
accessKey,
options,
done,
), callback);
}
verifySignatureV4(stringToSign, signatureFromRequest, accessKey, region, scopeDate, options, callback) {
this._tryEachClient((client, done) => client.verifySignatureV4(
stringToSign,
signatureFromRequest,
accessKey,
region,
scopeDate,
options,
done,
), callback);
}
static _mergeObjects(objectResponses) {
return objectResponses.reduce(
(retObj, resObj) => Object.assign(retObj, resObj.message.body),
{});
}
getCanonicalIds(emailAddresses, options, callback) {
this._forEachClient(
(client, done) => client.getCanonicalIds(emailAddresses, options, done),
(err, res) => {
if (err) {
return callback(err);
}
// TODO: atm naive merge, better handling of conflicting email results
return callback(null, {
message: {
body: ChainBackend._mergeObjects(res),
},
});
});
}
getEmailAddresses(canonicalIDs, options, callback) {
this._forEachClient(
(client, done) => client.getEmailAddresses(canonicalIDs, options, done),
(err, res) => {
if (err) {
return callback(err);
}
return callback(null, {
message: {
body: ChainBackend._mergeObjects(res),
},
});
});
}
/*
* merge policy responses into a single message
*/
static _mergePolicies(policyResponses) {
const policyMap = {};
policyResponses.forEach(resp => {
if (!resp.message || !Array.isArray(resp.message.body)) {
return;
}
resp.message.body.forEach(policy => {
const key = (policy.arn || '') + (policy.versionId || '');
if (!policyMap[key] || !policyMap[key].isAllowed) {
policyMap[key] = policy;
}
// else is duplicate policy
});
});
return Object.keys(policyMap).map(key => {
const policyRes = { isAllowed: policyMap[key].isAllowed };
if (policyMap[key].arn !== '') {
policyRes.arn = policyMap[key].arn;
}
if (policyMap[key].versionId) {
policyRes.versionId = policyMap[key].versionId;
}
return policyRes;
});
}
/*
response format:
{ message: {
body: [{}],
code: number,
message: string,
} }
*/
checkPolicies(requestContextParams, userArn, options, callback) {
this._forEachClient((client, done) => client.checkPolicies(
requestContextParams,
userArn,
options,
done,
), (err, res) => {
if (err) {
return callback(err);
}
return callback(null, {
message: {
body: ChainBackend._mergePolicies(res),
},
});
});
}
healthcheck(reqUid, callback) {
this._forEachClient((client, done) =>
client.healthcheck(reqUid, (err, res) => done(null, {
error: !!err ? err : null,
status: res,
}),
), (err, res) => {
if (err) {
return callback(err);
}
const isError = res.some(results => !!results.error);
if (isError) {
return callback(errors.InternalError, res);
}
return callback(null, res);
});
}
}
module.exports = ChainBackend;

86
lib/auth/backends/base.js Normal file
View File

@ -0,0 +1,86 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../errors');
/**
* Base backend class
*
* @class BaseBackend
*/
class BaseBackend {
/**
* @constructor
* @param {string} service - service identifer for construction arn
*/
constructor(service) {
this.service = service;
}
/** verifySignatureV2
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {object} options - contains algorithm (SHA1 or SHA256)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV2(stringToSign, signatureFromRequest,
accessKey, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/** verifySignatureV4
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {string} region - region specified in request credential
* @param {string} scopeDate - date specified in request credential
* @param {object} options - options to send to Vault
* (just contains reqUid for logging in Vault)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV4(stringToSign, signatureFromRequest, accessKey,
region, scopeDate, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets canonical ID's for a list of accounts
* based on email associated with account
* @param {array} emails - list of email addresses
* @param {object} options - to send log id to vault
* @param {function} callback - callback to calling function
* @returns {function} callback with either error or
* object with email addresses as keys and canonical IDs
* as values
*/
getCanonicalIds(emails, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets email addresses (referred to as diplay names for getACL's)
* for a list of accounts based on canonical IDs associated with account
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} options - to send log id to vault
* @param {function} callback - callback to calling function
* @returns {function} callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an email address as the value (or "NotFound")
*/
getEmailAddresses(canonicalIDs, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
checkPolicies(requestContextParams, userArn, options, callback) {
return callback(null, { message: { body: [] } });
}
healthcheck(reqUid, callback) {
return callback(null, { code: 200, message: 'OK' });
}
}
module.exports = BaseBackend;

View File

@ -3,7 +3,7 @@ const glob = require('simple-glob');
const joi = require('@hapi/joi');
const werelogs = require('werelogs');
const ARN = require('../../models/ARN');
const ARN = require('../../../models/ARN');
/**
* Load authentication information from files or pre-loaded account
@ -26,20 +26,20 @@ class AuthLoader {
.required();
const accountsJoi = joi.array()
.items({
name: joi.string().required(),
email: joi.string().email().required(),
arn: joi.string().required(),
canonicalID: joi.string().required(),
shortid: joi.string().regex(/^[0-9]{12}$/).required(),
keys: this._joiKeysValidator,
// backward-compat
users: joi.array(),
})
.required()
.unique('arn')
.unique('email')
.unique('canonicalID');
.items({
name: joi.string().required(),
email: joi.string().email().required(),
arn: joi.string().required(),
canonicalID: joi.string().required(),
shortid: joi.string().regex(/^[0-9]{12}$/).required(),
keys: this._joiKeysValidator,
// backward-compat
users: joi.array(),
})
.required()
.unique('arn')
.unique('email')
.unique('canonicalID');
this._joiValidator = joi.object({ accounts: accountsJoi });
}
@ -136,7 +136,7 @@ class AuthLoader {
_validateData(authData, filePath) {
const res = joi.validate(authData, this._joiValidator,
{ abortEarly: false });
{ abortEarly: false });
if (res.error) {
this._dumpJoiErrors(res.error.details, filePath);
return false;
@ -156,7 +156,7 @@ class AuthLoader {
'master/conf/authdata.json). Also note that support ' +
'for account users has been dropped.',
{ accountName: account.name, accountArn: account.arn,
filePath });
filePath });
arnError = true;
return;
}
@ -167,7 +167,7 @@ class AuthLoader {
'https://github.com/scality/S3/blob/master/conf/' +
'authdata.json)',
{ accountName: account.name, accountArn: account.arn,
filePath });
filePath });
arnError = true;
return;
}
@ -176,8 +176,8 @@ class AuthLoader {
this._log.error(
'authentication config validation error',
{ reason: arnObj.error.description,
accountName: account.name, accountArn: account.arn,
filePath });
accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
@ -185,8 +185,8 @@ class AuthLoader {
this._log.error(
'authentication config validation error',
{ reason: 'not an IAM account ARN',
accountName: account.name, accountArn: account.arn,
filePath });
accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
@ -215,7 +215,7 @@ class AuthLoader {
logInfo.context = err.context;
}
this._log.error('authentication config validation error',
logInfo);
logInfo);
});
}
}

View File

@ -2,10 +2,11 @@
const crypto = require('crypto');
const errors = require('../../errors');
const errors = require('../../../errors');
const calculateSigningKey = require('./vaultUtilities').calculateSigningKey;
const hashSignature = require('./vaultUtilities').hashSignature;
const Indexer = require('./Indexer');
const BaseBackend = require('../base');
function _formatResponse(userInfoToSend) {
return {
@ -19,9 +20,9 @@ function _formatResponse(userInfoToSend) {
* Class that provides a memory backend for verifying signatures and getting
* emails and canonical ids associated with an account.
*
* @class Backend
* @class InMemoryBackend
*/
class Backend {
class InMemoryBackend extends BaseBackend {
/**
* @constructor
* @param {string} service - service identifer for construction arn
@ -30,19 +31,11 @@ class Backend {
* back and returns it in an object
*/
constructor(service, indexer, formatter) {
this.service = service;
super(service);
this.indexer = indexer;
this.formatResponse = formatter;
}
/** verifySignatureV2
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {object} options - contains algorithm (SHA1 or SHA256)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV2(stringToSign, signatureFromRequest,
accessKey, options, callback) {
const entity = this.indexer.getEntityByKey(accessKey);
@ -65,18 +58,6 @@ class Backend {
return callback(null, vaultReturnObject);
}
/** verifySignatureV4
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {string} region - region specified in request credential
* @param {string} scopeDate - date specified in request credential
* @param {object} options - options to send to Vault
* (just contains reqUid for logging in Vault)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV4(stringToSign, signatureFromRequest, accessKey,
region, scopeDate, options, callback) {
const entity = this.indexer.getEntityByKey(accessKey);
@ -100,16 +81,6 @@ class Backend {
return callback(null, vaultReturnObject);
}
/**
* Gets canonical ID's for a list of accounts
* based on email associated with account
* @param {array} emails - list of email addresses
* @param {object} log - log object
* @param {function} cb - callback to calling function
* @returns {function} callback with either error or
* object with email addresses as keys and canonical IDs
* as values
*/
getCanonicalIds(emails, log, cb) {
const results = {};
emails.forEach(email => {
@ -130,16 +101,6 @@ class Backend {
return cb(null, vaultReturnObject);
}
/**
* Gets email addresses (referred to as diplay names for getACL's)
* for a list of accounts based on canonical IDs associated with account
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} options - to send log id to vault
* @param {function} cb - callback to calling function
* @returns {function} callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an email address as the value (or "NotFound")
*/
getEmailAddresses(canonicalIDs, options, cb) {
const results = {};
canonicalIDs.forEach(canonicalId => {
@ -188,7 +149,7 @@ class Backend {
}
class S3AuthBackend extends Backend {
class S3AuthBackend extends InMemoryBackend {
/**
* @constructor
* @param {object} authdata - the authentication config file's data

View File

@ -41,7 +41,7 @@ function getCanonicalizedAmzHeaders(headers, clientType) {
// Build headerString
return amzHeaders.reduce((headerStr, current) =>
`${headerStr}${current[0]}:${current[1]}\n`,
'');
'');
}
module.exports = getCanonicalizedAmzHeaders;

View File

@ -22,9 +22,9 @@ function check(request, log, data) {
timestamp = Date.parse(timestamp);
if (!timestamp) {
log.debug('missing or invalid date header',
{ method: 'auth/v2/headerAuthCheck.check' });
{ method: 'auth/v2/headerAuthCheck.check' });
return { err: errors.AccessDenied.
customizeDescription('Authentication requires a valid Date or ' +
customizeDescription('Authentication requires a valid Date or ' +
'x-amz-date header') };
}

View File

@ -42,12 +42,12 @@ function check(request, log, data) {
if (expirationTime > currentTime + preSignedURLExpiry) {
log.debug('expires parameter too far in future',
{ expires: request.query.Expires });
{ expires: request.query.Expires });
return { err: errors.AccessDenied };
}
if (currentTime > expirationTime) {
log.debug('current time exceeds expires time',
{ expires: request.query.Expires });
{ expires: request.query.Expires });
return { err: errors.RequestTimeTooSkewed };
}
const accessKey = data.AWSAccessKeyId;

View File

@ -43,7 +43,7 @@ function awsURIencode(input, encodeSlash, noEncodeStar) {
return encoded;
}
for (let i = 0; i < input.length; i++) {
const ch = input.charAt(i);
let ch = input.charAt(i);
if ((ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
@ -57,6 +57,20 @@ function awsURIencode(input, encodeSlash, noEncodeStar) {
} else if (ch === '*') {
encoded = encoded.concat(noEncodeStar ? '*' : '%2A');
} else {
if (ch >= '\uD800' && ch <= '\uDBFF') {
// If this character is a high surrogate peek the next character
// and join it with this one if the next character is a low
// surrogate.
// Otherwise the encoded URI will contain the two surrogates as
// two distinct UTF-8 sequences which is not valid UTF-8.
if (i + 1 < input.length) {
const ch2 = input.charAt(i + 1);
if (ch2 >= '\uDC00' && ch2 <= '\uDFFF') {
i++;
ch += ch2;
}
}
}
encoded = encoded.concat(_toHexUTF8(ch));
}
}

View File

@ -88,14 +88,14 @@ function check(request, log, data, awsService) {
}
if (!timestamp) {
log.debug('missing or invalid date header',
{ method: 'auth/v4/headerAuthCheck.check' });
{ method: 'auth/v4/headerAuthCheck.check' });
return { err: errors.AccessDenied.
customizeDescription('Authentication requires a valid Date or ' +
customizeDescription('Authentication requires a valid Date or ' +
'x-amz-date header') };
}
const validationResult = validateCredentials(credentialsArr, timestamp,
log);
log);
if (validationResult instanceof Error) {
log.debug('credentials in improper format', { credentialsArr,
timestamp, validationResult });
@ -127,6 +127,17 @@ function check(request, log, data, awsService) {
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath, err });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
const stringToSign = constructStringToSign({
log,
request,
@ -136,6 +147,7 @@ function check(request, log, data, awsService) {
timestamp,
payloadChecksum,
awsService: service,
proxyPath,
});
log.trace('constructed stringToSign', { stringToSign });
if (stringToSign instanceof Error) {

View File

@ -45,7 +45,7 @@ function check(request, log, data) {
}
const validationResult = validateCredentials(credential, timestamp,
log);
log);
if (validationResult instanceof Error) {
log.debug('credentials in improper format', { credential,
timestamp, validationResult });
@ -62,6 +62,17 @@ function check(request, log, data) {
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
// In query v4 auth, the canonical request needs
// to include the query params OTHER THAN
// the signature so create a
@ -87,6 +98,7 @@ function check(request, log, data) {
credentialScope:
`${scopeDate}/${region}/${service}/${requestType}`,
awsService: service,
proxyPath,
});
if (stringToSign instanceof Error) {
return { err: stringToSign };

View File

@ -25,20 +25,20 @@ function validateCredentials(credentials, timestamp, log) {
log.warn('accessKey provided is wrong format', { accessKey });
return errors.InvalidArgument;
}
// The scope date (format YYYYMMDD) must be same date as the timestamp
// on the request from the x-amz-date param (if queryAuthCheck)
// or from the x-amz-date header or date header (if headerAuthCheck)
// Format of timestamp is ISO 8601: YYYYMMDDTHHMMSSZ.
// http://docs.aws.amazon.com/AmazonS3/latest/API/
// sigv4-query-string-auth.html
// http://docs.aws.amazon.com/general/latest/gr/
// sigv4-date-handling.html
// The scope date (format YYYYMMDD) must be same date as the timestamp
// on the request from the x-amz-date param (if queryAuthCheck)
// or from the x-amz-date header or date header (if headerAuthCheck)
// Format of timestamp is ISO 8601: YYYYMMDDTHHMMSSZ.
// http://docs.aws.amazon.com/AmazonS3/latest/API/
// sigv4-query-string-auth.html
// http://docs.aws.amazon.com/general/latest/gr/
// sigv4-date-handling.html
// convert timestamp to format of scopeDate YYYYMMDD
// convert timestamp to format of scopeDate YYYYMMDD
const timestampDate = timestamp.split('T')[0];
if (scopeDate.length !== 8 || scopeDate !== timestampDate) {
log.warn('scope date must be the same date as the timestamp date',
{ scopeDate, timestampDate });
{ scopeDate, timestampDate });
return errors.RequestTimeTooSkewed;
}
if (service !== 's3' && service !== 'iam' && service !== 'ring' &&
@ -50,7 +50,7 @@ function validateCredentials(credentials, timestamp, log) {
}
if (requestType !== 'aws4_request') {
log.warn('requestType contained in params is not aws4_request',
{ requestType });
{ requestType });
return errors.InvalidArgument;
}
return {};
@ -68,7 +68,7 @@ function extractQueryParams(queryObj, log) {
// Do not need the algorithm sent back
if (queryObj['X-Amz-Algorithm'] !== 'AWS4-HMAC-SHA256') {
log.warn('algorithm param incorrect',
{ algo: queryObj['X-Amz-Algorithm'] });
{ algo: queryObj['X-Amz-Algorithm'] });
return authParams;
}

View File

@ -1,20 +1,21 @@
'use strict'; // eslint-disable-line strict
const crypto = require('crypto');
// The min value here is to manage further backward compat if we
// need it
const iamSecurityTokenSizeMin = 128;
const iamSecurityTokenSizeMax = 128;
// Security token is an hex string (no real format from amazon)
const iamSecurityTokenPattern =
new RegExp(`^[a-f0-9]{${iamSecurityTokenSizeMin},` +
`${iamSecurityTokenSizeMax}}$`);
// Default value
const vaultGeneratedIamSecurityTokenSizeMin = 128;
// Safe to assume that a typical token size is less than 8192 bytes
const vaultGeneratedIamSecurityTokenSizeMax = 8192;
// Base-64
const vaultGeneratedIamSecurityTokenPattern = /^[A-Za-z0-9/+=]*$/;
module.exports = {
// info about the iam security token
iamSecurityToken: {
min: iamSecurityTokenSizeMin,
max: iamSecurityTokenSizeMax,
pattern: iamSecurityTokenPattern,
min: vaultGeneratedIamSecurityTokenSizeMin,
max: vaultGeneratedIamSecurityTokenSizeMax,
pattern: vaultGeneratedIamSecurityTokenPattern,
},
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
@ -23,6 +24,7 @@ module.exports = {
zenkoServiceAccount: 'http://acs.zenko.io/accounts/service',
metadataFileNamespace: '/MDFile',
dataFileURL: '/DataFile',
passthroughFileURL: '/PassthroughFile',
// AWS states max size for user-defined metadata
// (x-amz-meta- headers) is 2 KB:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
@ -32,7 +34,10 @@ module.exports = {
emptyFileMd5: 'd41d8cd98f00b204e9800998ecf8427e',
// Version 2 changes the format of the data location property
// Version 3 adds the dataStoreName attribute
mdModelVersion: 3,
// Version 4 add the Creation-Time and Content-Language attributes,
// and add support for x-ms-meta-* headers in UserMetadata
// Version 5 adds the azureInfo structure
mdModelVersion: 5,
/*
* Splitter is used to build the object name for the overview of a
* multipart upload and to build the object names for each part of a
@ -72,9 +77,44 @@ module.exports = {
permittedCapitalizedBuckets: {
METADATA: true,
},
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
objectKeyByteLimit: 915,
/* delimiter for location-constraint. The location constraint will be able
* to include the ingestion flag
*/
zenkoSeparator: ':',
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true },
/* eslint-enable camelcase */
replicationBackends: { aws_s3: true, azure: true, gcp: true },
// hex digest of sha256 hash of empty string:
emptyStringHash: crypto.createHash('sha256')
.update('', 'binary').digest('hex'),
mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
// AWS sets a minimum size limit for parts except for the last part.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
minimumAllowedPartSize: 5242880,
gcpMaximumAllowedPartCount: 1024,
// GCP Object Tagging Prefix
gcpTaggingPrefix: 'aws-tag-',
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
legacyLocations: ['sproxyd', 'legacy'],
// healthcheck default call from nginx is every 2 seconds
// for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call
externalBackendHealthCheckInterval: 60000,
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
clientsRequireStringKey: { sproxyd: true, cdmi: true },
hasCopyPartBackends: { aws_s3: true, gcp: true },
versioningNotImplBackends: { azure: true, gcp: true },
// user metadata applied on zenko-created objects
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
// Default expiration value of the S3 pre-signed URL duration
// 604800 seconds (seven days).
defaultPreSignedURLExpiry: 7 * 24 * 60 * 60,
@ -91,10 +131,6 @@ module.exports = {
's3:ObjectRemoved:DeleteMarkerCreated',
]),
notificationArnPrefix: 'arn:scality:bucketnotif',
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
clientsRequireStringKey: { sproxyd: true, cdmi: true },
// HTTP server keep-alive timeout is set to a higher value than
// client's free sockets timeout to avoid the risk of triggering
// ECONNRESET errors if the server closes the connection at the

View File

@ -64,12 +64,12 @@ class IndexTransaction {
push(op) {
if (this.closed) {
throw propError('pushOnCommittedTransaction',
'can not add ops to already committed transaction');
'can not add ops to already committed transaction');
}
if (op.type !== 'put' && op.type !== 'del') {
throw propError('invalidTransactionVerb',
`unknown action type: ${op.type}`);
`unknown action type: ${op.type}`);
}
if (op.key === undefined) {
@ -137,7 +137,7 @@ class IndexTransaction {
addCondition(condition) {
if (this.closed) {
throw propError('pushOnCommittedTransaction',
'can not add conditions to already committed transaction');
'can not add conditions to already committed transaction');
}
if (condition === undefined || Object.keys(condition).length === 0) {
throw propError('missingCondition', 'missing condition for conditional put');
@ -159,12 +159,12 @@ class IndexTransaction {
commit(cb) {
if (this.closed) {
return cb(propError('alreadyCommitted',
'transaction was already committed'));
'transaction was already committed'));
}
if (this.operations.length === 0) {
return cb(propError('emptyTransaction',
'tried to commit an empty transaction'));
'tried to commit an empty transaction'));
}
this.closed = true;

View File

@ -76,11 +76,11 @@ function errorsGen() {
const errorsObj = require('../errors/arsenalErrors.json');
Object.keys(errorsObj)
.filter(index => index !== '_comment')
.forEach(index => {
errors[index] = new ArsenalError(index, errorsObj[index].code,
errorsObj[index].description);
});
.filter(index => index !== '_comment')
.forEach(index => {
errors[index] = new ArsenalError(index, errorsObj[index].code,
errorsObj[index].description);
});
return errors;
}

View File

@ -7,8 +7,8 @@
"test": "mocha --recursive --timeout 5500 tests/unit"
},
"dependencies": {
"mocha": "2.5.3",
"async": "^2.6.0",
"mocha": "5.2.0",
"async": "~2.6.1",
"node-forge": "^0.7.1"
}
}

View File

@ -17,9 +17,9 @@ describe('decyrptSecret', () => {
describe('parseServiceCredentials', () => {
const conf = {
users: [{ accessKey,
accountType: 'service-clueso',
secretKey,
userName: 'Search Service Account' }],
accountType: 'service-clueso',
secretKey,
userName: 'Search Service Account' }],
};
const auth = JSON.stringify({ privateKey });

View File

@ -25,7 +25,7 @@ module.exports.once = function once(func) {
state.res = func.apply(func, args);
} else {
debug('function already called:', func,
'returning cached result:', state.res);
'returning cached result:', state.res);
}
return state.res;
};

View File

@ -17,11 +17,33 @@ class RedisClient {
method: 'RedisClient.constructor',
redisHost: config.host,
redisPort: config.port,
})
}),
);
return this;
}
/**
* scan a pattern and return matching keys
* @param {string} pattern - string pattern to match with all existing keys
* @param {number} [count=10] - scan count
* @param {callback} cb - callback (error, result)
* @return {undefined}
*/
scan(pattern, count = 10, cb) {
const params = { match: pattern, count };
const keys = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/**
* increment value of a key by 1 and set a ttl
* @param {string} key - key holding the value
@ -35,6 +57,17 @@ class RedisClient {
.exec(cb);
}
/**
* increment value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, amount, cb) {
return this._client.incrby(key, amount, cb);
}
/**
* increment value of a key by a given amount and set a ttl
* @param {string} key - key holding the value
@ -50,13 +83,24 @@ class RedisClient {
}
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
* decrement value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
decrby(key, amount, cb) {
return this._client.decrby(key, amount, cb);
}
/**
* get value stored at key
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
return this._client.get(key, cb);
}
/**
@ -71,6 +115,16 @@ class RedisClient {
return this._client.exists(key, cb);
}
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
}
/**
* Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key.
@ -150,12 +204,26 @@ class RedisClient {
return this._client.zrangebyscore(key, min, max, cb);
}
/**
* get TTL or expiration in seconds
* @param {string} key - name of key
* @param {function} cb - callback
* @return {undefined}
*/
ttl(key, cb) {
return this._client.ttl(key, cb);
}
clear(cb) {
return this._client.flushdb(cb);
}
disconnect() {
this._client.disconnect();
disconnect(cb) {
return this._client.quit(cb);
}
listClients(cb) {
return this._client.client('list', cb);
}
}

View File

@ -41,11 +41,11 @@ class StatsClient {
/**
* build redis key to get total number of occurrences on the server
* @param {string} name - key name identifier
* @param {object} d - Date instance
* @param {Date} date - Date instance
* @return {string} key - key for redis
*/
_buildKey(name, d) {
return `${name}:${this._normalizeTimestamp(d)}`;
buildKey(name, date) {
return `${name}:${this._normalizeTimestamp(date)}`;
}
/**
@ -85,11 +85,35 @@ class StatsClient {
amount = (typeof incr === 'number') ? incr : 1;
}
const key = this._buildKey(`${id}:requests`, new Date());
const key = this.buildKey(`${id}:requests`, new Date());
return this._redis.incrbyEx(key, amount, this._expiry, callback);
}
/**
* Increment the given key by the given value.
* @param {String} key - The Redis key to increment
* @param {Number} incr - The value to increment by
* @param {function} [cb] - callback
* @return {undefined}
*/
incrementKey(key, incr, cb) {
const callback = cb || this._noop;
return this._redis.incrby(key, incr, callback);
}
/**
* Decrement the given key by the given value.
* @param {String} key - The Redis key to decrement
* @param {Number} decr - The value to decrement by
* @param {function} [cb] - callback
* @return {undefined}
*/
decrementKey(key, decr, cb) {
const callback = cb || this._noop;
return this._redis.decrby(key, decr, callback);
}
/**
* report/record a request that ended up being a 500 on the server
* @param {string} id - service identifier
@ -101,10 +125,54 @@ class StatsClient {
return undefined;
}
const callback = cb || this._noop;
const key = this._buildKey(`${id}:500s`, new Date());
const key = this.buildKey(`${id}:500s`, new Date());
return this._redis.incrEx(key, this._expiry, callback);
}
/**
* wrapper on `getStats` that handles a list of keys
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const statsRes = {
'requests': 0,
'500s': 0,
'sampleDuration': this._expiry,
};
let requests = 0;
let errors = 0;
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests += res.requests;
errors += res['500s'];
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsClient.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = requests;
statsRes['500s'] = errors;
return cb(null, statsRes);
});
}
/**
* get stats for the last x seconds, x being the sampling duration
* @param {object} log - Werelogs request logger
@ -121,8 +189,8 @@ class StatsClient {
const reqsKeys = [];
const req500sKeys = [];
for (let i = 0; i < totalKeys; i++) {
reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]);
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
this._setPrevInterval(d);
}
return async.parallel([

View File

@ -1,11 +1,148 @@
const async = require('async');
const StatsClient = require('./StatsClient');
/**
/**
* @class StatsModel
*
* @classdesc Extend and overwrite how timestamps are normalized by minutes
* rather than by seconds
*/
class StatsModel extends StatsClient {
/**
* Utility method to convert 2d array rows to columns, and vice versa
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
* @param {array} arrays - 2d array of integers
* @return {array} converted array
*/
_zip(arrays) {
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
return arrays[0].map((_, i) => arrays.map(a => a[i]));
}
return [];
}
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the count as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval with length of (expiry / interval)
*/
_getCount(arr) {
const size = Math.floor(this._expiry / this._interval);
const array = arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
if (array.length < size) {
array.push(...Array(size - array.length).fill(0));
}
return array;
}
/**
* wrapper on `getStats` that handles a list of keys
* override the method to reduce the returned 2d array from `_getCount`
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const size = Math.floor(this._expiry / this._interval);
const statsRes = {
'requests': Array(size).fill(0),
'500s': Array(size).fill(0),
'sampleDuration': this._expiry,
};
const requests = [];
const errors = [];
if (ids.length === 0) {
return cb(null, statsRes);
}
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests.push(res.requests);
errors.push(res['500s']);
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsModel.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = this._zip(requests).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
statsRes['500s'] = this._zip(errors).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
return cb(null, statsRes);
});
}
/**
* Handles getting a list of global keys.
* @param {array} ids - Service identifiers
* @param {object} log - Werelogs request logger
* @param {function} cb - Callback
* @return {undefined}
*/
getAllGlobalStats(ids, log, cb) {
const reqsKeys = ids.map(key => (['get', key]));
return this._redis.batch(reqsKeys, (err, res) => {
const statsRes = { requests: 0 };
if (err) {
log.error('error getting metrics', {
error: err,
method: 'StatsClient.getAllGlobalStats',
});
return cb(null, statsRes);
}
statsRes.requests = res.reduce((sum, curr) => {
const [cmdErr, val] = curr;
if (cmdErr) {
// Log any individual request errors from the batch request.
log.error('error getting metrics', {
error: cmdErr,
method: 'StatsClient.getAllGlobalStats',
});
}
return sum + (Number.parseInt(val, 10) || 0);
}, 0);
return cb(null, statsRes);
});
}
/**
* normalize date timestamp to the nearest hour
* @param {Date} d - Date instance
@ -24,34 +161,6 @@ class StatsModel extends StatsClient {
return d.setHours(d.getHours() - 1);
}
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the result as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval
*/
_getCount(arr) {
return arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
}
/**
* get list of sorted set key timestamps
* @param {number} epoch - epoch time

View File

@ -2,8 +2,8 @@ const promClient = require('prom-client');
const collectDefaultMetricsIntervalMs =
process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS !== undefined ?
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
10000;
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
10000;
promClient.collectDefaultMetrics({ timeout: collectDefaultMetricsIntervalMs });

View File

@ -27,7 +27,7 @@ class ARN {
static createFromString(arnStr) {
const [arn, partition, service, region, accountId,
resourceType, resource] = arnStr.split(':');
resourceType, resource] = arnStr.split(':');
if (arn !== 'arn') {
return { error: errors.InvalidArgument.customizeDescription(
@ -58,7 +58,7 @@ class ARN {
'must be a 12-digit number or "*"') };
}
const fullResource = (resource !== undefined ?
`${resourceType}:${resource}` : resourceType);
`${resourceType}:${resource}` : resourceType);
return new ARN(partition, service, region, accountId, fullResource);
}
@ -98,7 +98,7 @@ class ARN {
toString() {
return ['arn', this.getPartition(), this.getService(),
this.getRegion(), this.getAccountId(), this.getResource()]
this.getRegion(), this.getAccountId(), this.getResource()]
.join(':');
}
}

View File

@ -0,0 +1,237 @@
/**
* Helper class to ease access to the Azure specific information for
* storage accounts mapped to buckets.
*/
class BucketAzureInfo {
/**
* @constructor
* @param {object} obj - Raw structure for the Azure info on storage account
* @param {string} obj.sku - SKU name of this storage account
* @param {string} obj.accessTier - Access Tier name of this storage account
* @param {string} obj.kind - Kind name of this storage account
* @param {string[]} obj.systemKeys - pair of shared keys for the system
* @param {string[]} obj.tenantKeys - pair of shared keys for the tenant
* @param {string} obj.subscriptionId - subscription ID the storage account
* belongs to
* @param {string} obj.resourceGroup - Resource group name the storage
* account belongs to
* @param {object} obj.deleteRetentionPolicy - Delete retention policy
* @param {boolean} obj.deleteRetentionPolicy.enabled -
* @param {number} obj.deleteRetentionPolicy.days -
* @param {object[]} obj.managementPolicies - Management policies for this
* storage account
* @param {boolean} obj.httpsOnly - Server the content of this storage
* account through HTTPS only
* @param {object} obj.tags - Set of tags applied on this storage account
* @param {object[]} obj.networkACL - Network ACL of this storage account
* @param {string} obj.cname - CNAME of this storage account
* @param {boolean} obj.azureFilesAADIntegration - whether or not Azure
* Files AAD Integration is enabled for this storage account
* @param {boolean} obj.hnsEnabled - whether or not a hierarchical namespace
* is enabled for this storage account
* @param {object} obj.logging - service properties: logging
* @param {object} obj.hourMetrics - service properties: hourMetrics
* @param {object} obj.minuteMetrics - service properties: minuteMetrics
* @param {string} obj.serviceVersion - service properties: serviceVersion
*/
constructor(obj) {
this._data = {
sku: obj.sku,
accessTier: obj.accessTier,
kind: obj.kind,
systemKeys: obj.systemKeys,
tenantKeys: obj.tenantKeys,
subscriptionId: obj.subscriptionId,
resourceGroup: obj.resourceGroup,
deleteRetentionPolicy: obj.deleteRetentionPolicy,
managementPolicies: obj.managementPolicies,
httpsOnly: obj.httpsOnly,
tags: obj.tags,
networkACL: obj.networkACL,
cname: obj.cname,
azureFilesAADIntegration: obj.azureFilesAADIntegration,
hnsEnabled: obj.hnsEnabled,
logging: obj.logging,
hourMetrics: obj.hourMetrics,
minuteMetrics: obj.minuteMetrics,
serviceVersion: obj.serviceVersion,
};
}
getSku() {
return this._data.sku;
}
setSku(sku) {
this._data.sku = sku;
return this;
}
getAccessTier() {
return this._data.accessTier;
}
setAccessTier(accessTier) {
this._data.accessTier = accessTier;
return this;
}
getKind() {
return this._data.kind;
}
setKind(kind) {
this._data.kind = kind;
return this;
}
getSystemKeys() {
return this._data.systemKeys;
}
setSystemKeys(systemKeys) {
this._data.systemKeys = systemKeys;
return this;
}
getTenantKeys() {
return this._data.tenantKeys;
}
setTenantKeys(tenantKeys) {
this._data.tenantKeys = tenantKeys;
return this;
}
getSubscriptionId() {
return this._data.subscriptionId;
}
setSubscriptionId(subscriptionId) {
this._data.subscriptionId = subscriptionId;
return this;
}
getResourceGroup() {
return this._data.resourceGroup;
}
setResourceGroup(resourceGroup) {
this._data.resourceGroup = resourceGroup;
return this;
}
getDeleteRetentionPolicy() {
return this._data.deleteRetentionPolicy;
}
setDeleteRetentionPolicy(deleteRetentionPolicy) {
this._data.deleteRetentionPolicy = deleteRetentionPolicy;
return this;
}
getManagementPolicies() {
return this._data.managementPolicies;
}
setManagementPolicies(managementPolicies) {
this._data.managementPolicies = managementPolicies;
return this;
}
getHttpsOnly() {
return this._data.httpsOnly;
}
setHttpsOnly(httpsOnly) {
this._data.httpsOnly = httpsOnly;
return this;
}
getTags() {
return this._data.tags;
}
setTags(tags) {
this._data.tags = tags;
return this;
}
getNetworkACL() {
return this._data.networkACL;
}
setNetworkACL(networkACL) {
this._data.networkACL = networkACL;
return this;
}
getCname() {
return this._data.cname;
}
setCname(cname) {
this._data.cname = cname;
return this;
}
getAzureFilesAADIntegration() {
return this._data.azureFilesAADIntegration;
}
setAzureFilesAADIntegration(azureFilesAADIntegration) {
this._data.azureFilesAADIntegration = azureFilesAADIntegration;
return this;
}
getHnsEnabled() {
return this._data.hnsEnabled;
}
setHnsEnabled(hnsEnabled) {
this._data.hnsEnabled = hnsEnabled;
return this;
}
getLogging() {
return this._data.logging;
}
setLogging(logging) {
this._data.logging = logging;
return this;
}
getHourMetrics() {
return this._data.hourMetrics;
}
setHourMetrics(hourMetrics) {
this._data.hourMetrics = hourMetrics;
return this;
}
getMinuteMetrics() {
return this._data.minuteMetrics;
}
setMinuteMetrics(minuteMetrics) {
this._data.minuteMetrics = minuteMetrics;
return this;
}
getServiceVersion() {
return this._data.serviceVersion;
}
setServiceVersion(serviceVersion) {
this._data.serviceVersion = serviceVersion;
return this;
}
getValue() {
return this._data;
}
}
module.exports = BucketAzureInfo;

View File

@ -9,8 +9,9 @@ const BucketPolicy = require('./BucketPolicy');
const NotificationConfiguration = require('./NotificationConfiguration');
// WHEN UPDATING THIS NUMBER, UPDATE BucketInfoModelVersion.md CHANGELOG
// BucketInfoModelVersion.md can be found in the root of this repository
const modelVersion = 10;
// BucketInfoModelVersion.md can be found in documentation/ at the root
// of this repository
const modelVersion = 14;
class BucketInfo {
/**
@ -41,7 +42,8 @@ class BucketInfo {
* @param {object} versioningConfiguration - versioning configuration
* @param {string} versioningConfiguration.Status - versioning status
* @param {object} versioningConfiguration.MfaDelete - versioning mfa delete
* @param {string} locationConstraint - locationConstraint for bucket
* @param {string} locationConstraint - locationConstraint for bucket that
* also includes the ingestion flag
* @param {WebsiteConfiguration} [websiteConfiguration] - website
* configuration
* @param {object[]} [cors] - collection of CORS rules to apply
@ -57,17 +59,23 @@ class BucketInfo {
* @param {object} [lifecycleConfiguration] - lifecycle configuration
* @param {object} [bucketPolicy] - bucket policy
* @param {string} [uid] - unique identifier for the bucket, necessary
* @param {string} readLocationConstraint - readLocationConstraint for bucket
* addition for use with lifecycle operations
* @param {boolean} [isNFS] - whether the bucket is on NFS
* @param {object} [ingestionConfig] - object for ingestion status: en/dis
* @param {object} [azureInfo] - Azure storage account specific info
* @param {boolean} [objectLockEnabled] - true when object lock enabled
* @param {object} [objectLockConfiguration] - object lock configuration
* @param {object} [notificationConfiguration] - bucket notification configuration
*/
constructor(name, owner, ownerDisplayName, creationDate,
mdBucketModelVersion, acl, transient, deleted,
serverSideEncryption, versioningConfiguration,
locationConstraint, websiteConfiguration, cors,
replicationConfiguration, lifecycleConfiguration,
bucketPolicy, uid, objectLockEnabled, objectLockConfiguration,
notificationConfiguration) {
mdBucketModelVersion, acl, transient, deleted,
serverSideEncryption, versioningConfiguration,
locationConstraint, websiteConfiguration, cors,
replicationConfiguration, lifecycleConfiguration,
bucketPolicy, uid, readLocationConstraint, isNFS,
ingestionConfig, azureInfo, objectLockEnabled,
objectLockConfiguration, notificationConfiguration) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof owner, 'string');
assert.strictEqual(typeof ownerDisplayName, 'string');
@ -86,7 +94,7 @@ class BucketInfo {
if (serverSideEncryption) {
assert.strictEqual(typeof serverSideEncryption, 'object');
const { cryptoScheme, algorithm, masterKeyId,
configuredMasterKeyId, mandatory } = serverSideEncryption;
configuredMasterKeyId, mandatory } = serverSideEncryption;
assert.strictEqual(typeof cryptoScheme, 'number');
assert.strictEqual(typeof algorithm, 'string');
assert.strictEqual(typeof masterKeyId, 'string');
@ -108,6 +116,15 @@ class BucketInfo {
if (locationConstraint) {
assert.strictEqual(typeof locationConstraint, 'string');
}
if (ingestionConfig) {
assert.strictEqual(typeof ingestionConfig, 'object');
}
if (azureInfo) {
assert.strictEqual(typeof azureInfo, 'object');
}
if (readLocationConstraint) {
assert.strictEqual(typeof readLocationConstraint, 'string');
}
if (websiteConfiguration) {
assert(websiteConfiguration instanceof WebsiteConfiguration);
const { indexDocument, errorDocument, redirectAllRequestsTo,
@ -164,12 +181,16 @@ class BucketInfo {
this._serverSideEncryption = serverSideEncryption || null;
this._versioningConfiguration = versioningConfiguration || null;
this._locationConstraint = locationConstraint || null;
this._readLocationConstraint = readLocationConstraint || null;
this._websiteConfiguration = websiteConfiguration || null;
this._replicationConfiguration = replicationConfiguration || null;
this._cors = cors || null;
this._lifecycleConfiguration = lifecycleConfiguration || null;
this._bucketPolicy = bucketPolicy || null;
this._uid = uid || uuid();
this._isNFS = isNFS || null;
this._ingestion = ingestionConfig || null;
this._azureInfo = azureInfo || null;
this._objectLockEnabled = objectLockEnabled || false;
this._objectLockConfiguration = objectLockConfiguration || null;
this._notificationConfiguration = notificationConfiguration || null;
@ -192,12 +213,16 @@ class BucketInfo {
serverSideEncryption: this._serverSideEncryption,
versioningConfiguration: this._versioningConfiguration,
locationConstraint: this._locationConstraint,
readLocationConstraint: this._readLocationConstraint,
websiteConfiguration: undefined,
cors: this._cors,
replicationConfiguration: this._replicationConfiguration,
lifecycleConfiguration: this._lifecycleConfiguration,
bucketPolicy: this._bucketPolicy,
uid: this._uid,
isNFS: this._isNFS,
ingestion: this._ingestion,
azureInfo: this._azureInfo,
objectLockEnabled: this._objectLockEnabled,
objectLockConfiguration: this._objectLockConfiguration,
notificationConfiguration: this._notificationConfiguration,
@ -222,7 +247,8 @@ class BucketInfo {
obj.transient, obj.deleted, obj.serverSideEncryption,
obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
obj.bucketPolicy, obj.uid, obj.objectLockEnabled,
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
obj.objectLockConfiguration, obj.notificationConfiguration);
}
@ -247,8 +273,10 @@ class BucketInfo {
data._versioningConfiguration, data._locationConstraint,
data._websiteConfiguration, data._cors,
data._replicationConfiguration, data._lifecycleConfiguration,
data._bucketPolicy, data._uid, data._objectLockEnabled,
data._objectLockConfiguration, data._notificationConfiguration);
data._bucketPolicy, data._uid, data._readLocationConstraint,
data._isNFS, data._ingestion, data._azureInfo,
data._objectLockEnabled, data._objectLockConfiguration,
data._notificationConfiguration);
}
/**
@ -545,6 +573,17 @@ class BucketInfo {
return this._locationConstraint;
}
/**
* Get read location constraint.
* @return {string} - bucket read location constraint
*/
getReadLocationConstraint() {
if (this._readLocationConstraint) {
return this._readLocationConstraint;
}
return this._locationConstraint;
}
/**
* Set Bucket model version
*
@ -633,6 +672,85 @@ class BucketInfo {
this._uid = uid;
return this;
}
/**
* Check if the bucket is an NFS bucket.
* @return {boolean} - Wether the bucket is NFS or not
*/
isNFS() {
return this._isNFS;
}
/**
* Set whether the bucket is an NFS bucket.
* @param {boolean} isNFS - Wether the bucket is NFS or not
* @return {BucketInfo} - bucket info instance
*/
setIsNFS(isNFS) {
this._isNFS = isNFS;
return this;
}
/**
* enable ingestion, set 'this._ingestion' to { status: 'enabled' }
* @return {BucketInfo} - bucket info instance
*/
enableIngestion() {
this._ingestion = { status: 'enabled' };
return this;
}
/**
* disable ingestion, set 'this._ingestion' to { status: 'disabled' }
* @return {BucketInfo} - bucket info instance
*/
disableIngestion() {
this._ingestion = { status: 'disabled' };
return this;
}
/**
* Get ingestion configuration
* @return {object} - bucket ingestion configuration: Enabled or Disabled
*/
getIngestion() {
return this._ingestion;
}
/**
** Check if bucket is an ingestion bucket
* @return {boolean} - 'true' if bucket is ingestion bucket, 'false' if
* otherwise
*/
isIngestionBucket() {
const ingestionConfig = this.getIngestion();
if (ingestionConfig) {
return true;
}
return false;
}
/**
* Check if ingestion is enabled
* @return {boolean} - 'true' if ingestion is enabled, otherwise 'false'
*/
isIngestionEnabled() {
const ingestionConfig = this.getIngestion();
return ingestionConfig ? ingestionConfig.status === 'enabled' : false;
}
/**
* Return the Azure specific storage account information for this bucket
* @return {object} - a structure suitable for {@link BucketAzureIno}
* constructor
*/
getAzureInfo() {
return this._azureInfo;
}
/**
* Set the Azure specific storage account information for this bucket
* @param {object} azureInfo - a structure suitable for
* {@link BucketAzureInfo} construction
* @return {BucketInfo} - bucket info instance
*/
setAzureInfo(azureInfo) {
this._azureInfo = azureInfo;
return this;
}
/**
* Check if object lock is enabled.
* @return {boolean} - depending on whether object lock is enabled

View File

@ -5,6 +5,8 @@ const errors = require('../errors');
const LifecycleRule = require('./LifecycleRule');
const escapeForXml = require('../s3middleware/escapeForXml');
const MAX_DAYS = 2147483647; // Max 32-bit signed binary integer.
/**
* Format of xml request:
@ -85,10 +87,13 @@ class LifecycleConfiguration {
/**
* Create a Lifecycle Configuration instance
* @param {string} xml - the parsed xml
* @param {object} config - the CloudServer config
* @return {object} - LifecycleConfiguration instance
*/
constructor(xml) {
constructor(xml, config) {
this._parsedXML = xml;
this._storageClasses =
config.replicationEndpoints.map(endpoint => endpoint.site);
this._ruleIDs = [];
this._tagKeys = [];
this._config = {};
@ -211,9 +216,10 @@ class LifecycleConfiguration {
*/
_parseRule(rule) {
const ruleObj = {};
if (rule.Transition || rule.NoncurrentVersionTransition) {
if (rule.NoncurrentVersionTransition) {
ruleObj.error = errors.NotImplemented.customizeDescription(
'Transition lifecycle action not yet implemented');
'NoncurrentVersionTransition lifecycle action not yet ' +
'implemented');
return ruleObj;
}
// Either Prefix or Filter must be included, but can be empty string
@ -375,7 +381,7 @@ class LifecycleConfiguration {
if (!tags[i].Key || !tags[i].Value) {
tagObj.error =
errors.MissingRequiredParameter.customizeDescription(
'Tag XML does not contain both Key and Value');
'Tag XML does not contain both Key and Value');
break;
}
@ -468,6 +474,315 @@ class LifecycleConfiguration {
return statusObj;
}
/**
* Finds the prefix and/or tags of the given rule and gets the error message
* @param {object} rule - The rule to find the prefix in
* @return {string} - The prefix of filter information
*/
_getRuleFilterDesc(rule) {
if (rule.Prefix) {
return `prefix '${rule.Prefix[0]}'`;
}
// There must be a filter if no top-level prefix is provided. First
// check if there are multiple filters (i.e. `Filter.And`).
if (rule.Filter[0] === undefined || rule.Filter[0].And === undefined) {
const { Prefix, Tag } = rule.Filter[0] || {};
if (Prefix) {
return `filter '(prefix=${Prefix[0]})'`;
}
if (Tag) {
const { Key, Value } = Tag[0];
return `filter '(tag: key=${Key[0]}, value=${Value[0]})'`;
}
return 'filter (all)';
}
const filters = [];
const { Prefix, Tag } = rule.Filter[0].And[0];
if (Prefix) {
filters.push(`prefix=${Prefix[0]}`);
}
Tag.forEach(tag => {
const { Key, Value } = tag;
filters.push(`tag: key=${Key[0]}, value=${Value[0]}`);
});
const joinedFilters = filters.join(' and ');
return `filter '(${joinedFilters})'`;
}
/**
* Checks the validity of the given field
* @param {object} params - Given function parameters
* @param {string} params.days - The value of the field to check
* @param {string} params.field - The field name with the value
* @param {string} params.ancestor - The immediate ancestor field
* @return {object|null} Returns an error object or `null`
*/
_checkDays(params) {
const { days, field, ancestor } = params;
if (days < 0) {
const msg = `'${field}' in ${ancestor} action must be nonnegative`;
return errors.InvalidArgument.customizeDescription(msg);
}
if (days > MAX_DAYS) {
return errors.MalformedXML.customizeDescription(
`'${field}' in ${ancestor} action must not exceed ${MAX_DAYS}`);
}
return null;
}
/**
* Checks the validity of the given storage class
* @param {object} params - Given function parameters
* @param {array} params.usedStorageClasses - Storage classes used in other
* rules
* @param {string} params.storageClass - The storage class of the current
* rule
* @param {string} params.ancestor - The immediate ancestor field
* @param {string} params.prefix - The prefix of the rule
* @return {object|null} Returns an error object or `null`
*/
_checkStorageClasses(params) {
const { usedStorageClasses, storageClass, ancestor, rule } = params;
if (!this._storageClasses.includes(storageClass)) {
// This differs from the AWS message. This will help the user since
// the StorageClass does not conform to AWS specs.
const list = `'${this._storageClasses.join("', '")}'`;
const msg = `'StorageClass' must be one of ${list}`;
return errors.MalformedXML.customizeDescription(msg);
}
if (usedStorageClasses.includes(storageClass)) {
const msg = `'StorageClass' must be different for '${ancestor}' ` +
`actions in same 'Rule' with ${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
return null;
}
/**
* Ensure that transition rules are at least a day apart from each other.
* @param {object} params - Given function parameters
* @param {string} [params.days] - The days of the current transition
* @param {string} [params.date] - The date of the current transition
* @param {string} params.storageClass - The storage class of the current
* rule
* @param {string} params.rule - The current rule
* @return {undefined}
*/
_checkTimeGap(params) {
const { days, date, storageClass, rule } = params;
const invalidTransition = rule.Transition.find(transition => {
if (storageClass === transition.StorageClass[0]) {
return false;
}
if (days !== undefined) {
return Number.parseInt(transition.Days[0], 10) === days;
}
if (date !== undefined) {
const timestamp = new Date(date).getTime();
const compareTimestamp = new Date(transition.Date[0]).getTime();
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
return Math.abs(timestamp - compareTimestamp) < oneDay;
}
return false;
});
if (invalidTransition) {
const timeType = days !== undefined ? 'Days' : 'Date';
const filterMsg = this._getRuleFilterDesc(rule);
const compareStorageClass = invalidTransition.StorageClass[0];
const msg = `'${timeType}' in the 'Transition' action for ` +
`StorageClass '${storageClass}' for ${filterMsg} must be at ` +
`least one day apart from ${filterMsg} in the 'Transition' ` +
`action for StorageClass '${compareStorageClass}'`;
return errors.InvalidArgument.customizeDescription(msg);
}
return undefined;
}
/**
* Checks transition time type (i.e. 'Date' or 'Days') only occurs once
* across transitions and across transitions and expiration policies
* @param {object} params - Given function parameters
* @param {string} params.usedTimeType - The time type that has been used by
* another rule
* @param {string} params.currentTimeType - the time type used by the
* current rule
* @param {string} params.rule - The current rule
* @return {object|null} Returns an error object or `null`
*/
_checkTimeType(params) {
const { usedTimeType, currentTimeType, rule } = params;
if (usedTimeType && usedTimeType !== currentTimeType) {
const msg = "Found mixed 'Date' and 'Days' based Transition " +
'actions in lifecycle rule for ' +
`${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
// Transition time type cannot differ from the expiration, if provided.
if (rule.Expiration &&
rule.Expiration[0][currentTimeType] === undefined) {
const msg = "Found mixed 'Date' and 'Days' based Expiration and " +
'Transition actions in lifecycle rule for ' +
`${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
return null;
}
/**
* Checks the validity of the given date
* @param {string} date - The date the check
* @return {object|null} Returns an error object or `null`
*/
_checkDate(date) {
const isoRegex = new RegExp('^(-?(?:[1-9][0-9]*)?[0-9]{4})-' +
'(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])' +
':([0-5][0-9]):([0-5][0-9])(.[0-9]+)?(Z)?$');
if (!isoRegex.test(date)) {
const msg = 'Date must be in ISO 8601 format';
return errors.InvalidArgument.customizeDescription(msg);
}
return null;
}
/**
* Parses the NonCurrentVersionTransition value
* @param {object} rule - Rule object from Rule array from this._parsedXml
* @return {object} - Contains error if parsing failed, otherwise contains
* the parsed nonCurrentVersionTransition array
*
* Format of result:
* result = {
* error: <error>,
* nonCurrentVersionTransition: [
* {
* noncurrentDays: <non-current-days>,
* storageClass: <storage-class>,
* },
* ...
* ]
* }
*/
_parseNoncurrentVersionTransition(rule) {
const nonCurrentVersionTransition = [];
const usedStorageClasses = [];
for (let i = 0; i < rule.NoncurrentVersionTransition.length; i++) {
const t = rule.NoncurrentVersionTransition[i]; // Transition object
const noncurrentDays =
t.NoncurrentDays && Number.parseInt(t.NoncurrentDays[0], 10);
const storageClass = t.StorageClass && t.StorageClass[0];
if (noncurrentDays === undefined || storageClass === undefined) {
return { error: errors.MalformedXML };
}
let error = this._checkDays({
days: noncurrentDays,
field: 'NoncurrentDays',
ancestor: 'NoncurrentVersionTransition',
});
if (error) {
return { error };
}
error = this._checkStorageClasses({
storageClass,
usedStorageClasses,
ancestor: 'NoncurrentVersionTransition',
rule,
});
if (error) {
return { error };
}
nonCurrentVersionTransition.push({ noncurrentDays, storageClass });
usedStorageClasses.push(storageClass);
}
return { nonCurrentVersionTransition };
}
/**
* Parses the Transition value
* @param {object} rule - Rule object from Rule array from this._parsedXml
* @return {object} - Contains error if parsing failed, otherwise contains
* the parsed transition array
*
* Format of result:
* result = {
* error: <error>,
* transition: [
* {
* days: <days>,
* date: <date>,
* storageClass: <storage-class>,
* },
* ...
* ]
* }
*/
_parseTransition(rule) {
const transition = [];
const usedStorageClasses = [];
let usedTimeType = null;
for (let i = 0; i < rule.Transition.length; i++) {
const t = rule.Transition[i]; // Transition object
const days = t.Days && Number.parseInt(t.Days[0], 10);
const date = t.Date && t.Date[0];
const storageClass = t.StorageClass && t.StorageClass[0];
if ((days === undefined && date === undefined) ||
(days !== undefined && date !== undefined) ||
(storageClass === undefined)) {
return { error: errors.MalformedXML };
}
let error = this._checkStorageClasses({
storageClass,
usedStorageClasses,
ancestor: 'Transition',
rule,
});
if (error) {
return { error };
}
usedStorageClasses.push(storageClass);
if (days !== undefined) {
error = this._checkTimeType({
usedTimeType,
currentTimeType: 'Days',
rule,
});
if (error) {
return { error };
}
usedTimeType = 'Days';
error = this._checkDays({
days,
field: 'Days',
ancestor: 'Transition',
});
if (error) {
return { error };
}
transition.push({ days, storageClass });
}
if (date !== undefined) {
error = this._checkTimeType({
usedTimeType,
currentTimeType: 'Date',
rule,
});
if (error) {
return { error };
}
usedTimeType = 'Date';
error = this._checkDate(date);
if (error) {
return { error };
}
transition.push({ date, storageClass });
}
error = this._checkTimeGap({ days, date, storageClass, rule });
if (error) {
return { error };
}
}
return { transition };
}
/**
* Check that action component of rule is valid
* @param {object} rule - a rule object from Rule array from this._parsedXml
@ -492,8 +807,13 @@ class LifecycleConfiguration {
const actionsObj = {};
actionsObj.propName = 'actions';
actionsObj.actions = [];
const validActions = ['AbortIncompleteMultipartUpload',
'Expiration', 'NoncurrentVersionExpiration'];
const validActions = [
'AbortIncompleteMultipartUpload',
'Expiration',
'NoncurrentVersionExpiration',
'NoncurrentVersionTransition',
'Transition',
];
validActions.forEach(a => {
if (rule[a]) {
actionsObj.actions.push({ actionName: `${a}` });
@ -510,7 +830,8 @@ class LifecycleConfiguration {
if (action.error) {
actionsObj.error = action.error;
} else {
const actionTimes = ['days', 'date', 'deleteMarker'];
const actionTimes = ['days', 'date', 'deleteMarker',
'transition', 'nonCurrentVersionTransition'];
actionTimes.forEach(t => {
if (action[t]) {
// eslint-disable-next-line no-param-reassign
@ -597,12 +918,9 @@ class LifecycleConfiguration {
return expObj;
}
if (subExp.Date) {
const isoRegex = new RegExp('^(-?(?:[1-9][0-9]*)?[0-9]{4})-' +
'(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12][0-9])T(2[0-3]|[01][0-9])' +
':([0-5][0-9]):([0-5][0-9])(.[0-9]+)?(Z)?$');
if (!isoRegex.test(subExp.Date[0])) {
expObj.error = errors.InvalidArgument.customizeDescription(
'Date must be in ISO 8601 format');
const error = this._checkDate(subExp.Date[0]);
if (error) {
expObj.error = error;
} else {
expObj.date = subExp.Date[0];
}
@ -611,7 +929,7 @@ class LifecycleConfiguration {
const daysInt = parseInt(subExp.Days[0], 10);
if (daysInt < 1) {
expObj.error = errors.InvalidArgument.customizeDescription(
'Expiration days is not a positive integer');
'Expiration days is not a positive integer');
} else {
expObj.days = daysInt;
}
@ -714,6 +1032,26 @@ class LifecycleConfiguration {
if (a.deleteMarker) {
assert.strictEqual(typeof a.deleteMarker, 'string');
}
if (a.nonCurrentVersionTransition) {
assert.strictEqual(
typeof a.nonCurrentVersionTransition, 'object');
a.nonCurrentVersionTransition.forEach(t => {
assert.strictEqual(typeof t.noncurrentDays, 'number');
assert.strictEqual(typeof t.storageClass, 'string');
});
}
if (a.transition) {
assert.strictEqual(typeof a.transition, 'object');
a.transition.forEach(t => {
if (t.days || t.days === 0) {
assert.strictEqual(typeof t.days, 'number');
}
if (t.date !== undefined) {
assert.strictEqual(typeof t.date, 'string');
}
assert.strictEqual(typeof t.storageClass, 'string');
});
}
});
});
}
@ -763,7 +1101,8 @@ class LifecycleConfiguration {
}
const Actions = actions.map(action => {
const { actionName, days, date, deleteMarker } = action;
const { actionName, days, date, deleteMarker,
nonCurrentVersionTransition, transition } = action;
let Action;
if (actionName === 'AbortIncompleteMultipartUpload') {
Action = `<${actionName}><DaysAfterInitiation>${days}` +
@ -780,6 +1119,40 @@ class LifecycleConfiguration {
Action = `<${actionName}>${Days}${Date}${DelMarker}` +
`</${actionName}>`;
}
if (actionName === 'NoncurrentVersionTransition') {
const xml = [];
nonCurrentVersionTransition.forEach(transition => {
const { noncurrentDays, storageClass } = transition;
xml.push(
`<${actionName}>`,
`<NoncurrentDays>${noncurrentDays}` +
'</NoncurrentDays>',
`<StorageClass>${storageClass}</StorageClass>`,
`</${actionName}>`,
);
});
Action = xml.join('');
}
if (actionName === 'Transition') {
const xml = [];
transition.forEach(transition => {
const { days, date, storageClass } = transition;
let element;
if (days !== undefined) {
element = `<Days>${days}</Days>`;
}
if (date !== undefined) {
element = `<Date>${date}</Date>`;
}
xml.push(
`<${actionName}>`,
element,
`<StorageClass>${storageClass}</StorageClass>`,
`</${actionName}>`,
);
});
Action = xml.join('');
}
return Action;
}).join('');
return `<Rule>${ID}${Status}${Filter}${Actions}</Rule>`;

View File

@ -27,7 +27,7 @@ const errors = require('../errors');
* </NotificationConfiguration>
*/
/**
/**
* Format of config:
*
* config = {

View File

@ -17,7 +17,7 @@ const errors = require('../errors');
* </ObjectLockConfiguration>
*/
/**
/**
* Format of config:
*
* config = {

View File

@ -1,3 +1,5 @@
const crypto = require('crypto');
const constants = require('../constants');
const VersionIDUtils = require('../versioning/VersionID');
@ -8,7 +10,6 @@ const ObjectMDLocation = require('./ObjectMDLocation');
* mpuPart metadata for example)
*/
class ObjectMD {
/**
* Create a new instance of ObjectMD. Parameter <tt>objMd</tt> is
* reserved for internal use, users should call
@ -28,9 +29,14 @@ class ObjectMD {
} else {
this._updateFromParsedJSON(objMd);
}
if (!this._data['creation-time']) {
this.setCreationTime(this.getLastModified());
}
} else {
// set newly-created object md modified time to current time
this._data['last-modified'] = new Date().toJSON();
const dt = new Date().toJSON();
this.setLastModified(dt);
this.setCreationTime(dt);
}
// set latest md model version now that we ensured
// backward-compat conversion
@ -85,6 +91,8 @@ class ObjectMD {
'content-length': 0,
'content-type': '',
'content-md5': '',
'content-language': '',
'creation-time': undefined,
// simple/no version. will expand once object versioning is
// introduced
'x-amz-version-id': 'null',
@ -106,6 +114,7 @@ class ObjectMD {
},
'key': '',
'location': null,
'azureInfo': undefined,
// versionId, isNull, nullVersionId and isDeleteMarker
// should be undefined when not set explicitly
'isNull': undefined,
@ -124,6 +133,7 @@ class ObjectMD {
role: '',
storageType: '',
dataStoreVersionId: '',
isNFS: null,
},
'dataStoreName': '',
'originOp': '',
@ -138,7 +148,7 @@ class ObjectMD {
Object.assign(this._data, objMd._data);
Object.assign(this._data.replicationInfo,
objMd._data.replicationInfo);
objMd._data.replicationInfo);
}
_updateFromParsedJSON(objMd) {
@ -356,6 +366,50 @@ class ObjectMD {
return this._data['content-md5'];
}
/**
* Set content-language
*
* @param {string} contentLanguage - content-language
* @return {ObjectMD} itself
*/
setContentLanguage(contentLanguage) {
this._data['content-language'] = contentLanguage;
return this;
}
/**
* Returns content-language
*
* @return {string} content-language
*/
getContentLanguage() {
return this._data['content-language'];
}
/**
* Set Creation Date
*
* @param {string} creationTime - Creation Date
* @return {ObjectMD} itself
*/
setCreationTime(creationTime) {
this._data['creation-time'] = creationTime;
return this;
}
/**
* Returns Creation Date
*
* @return {string} Creation Date
*/
getCreationTime() {
// If creation-time is not set fallback to LastModified
if (!this._data['creation-time']) {
return this.getLastModified();
}
return this._data['creation-time'];
}
/**
* Set version id
*
@ -599,6 +653,29 @@ class ObjectMD {
return reducedLocations;
}
/**
* Set the Azure specific information
* @param {object} azureInfo - a plain JS structure representing the
* Azure specific information for a Blob or a Container (see constructor
* of {@link ObjectMDAzureInfo} for a description of the fields of this
* structure
* @return {ObjectMD} itself
*/
setAzureInfo(azureInfo) {
this._data.azureInfo = azureInfo;
return this;
}
/**
* Get the Azure specific information
* @return {object} a plain JS structure representing the Azure specific
* information for a Blob or a Container an suitable for the constructor
* of {@link ObjectMDAzureInfo}.
*/
getAzureInfo() {
return this._data.azureInfo;
}
/**
* Set metadata isNull value
*
@ -680,6 +757,19 @@ class ObjectMD {
return this._data.isDeleteMarker || false;
}
/**
* Get if the object is a multipart upload (MPU)
*
* The function checks the "content-md5" field: if it contains a
* dash ('-') it is a MPU, as the content-md5 string ends with
* "-[nbparts]" for MPUs.
*
* @return {boolean} Whether object is a multipart upload
*/
isMultipartUpload() {
return this.getContentMd5().includes('-');
}
/**
* Set metadata versionId value
*
@ -707,7 +797,10 @@ class ObjectMD {
* @return {string|undefined} The encoded object versionId
*/
getEncodedVersionId() {
return VersionIDUtils.encode(this.getVersionId());
if (this.getVersionId()) {
return VersionIDUtils.encode(this.getVersionId());
}
return undefined;
}
/**
@ -750,6 +843,20 @@ class ObjectMD {
return this._data.tags;
}
getUserMetadata() {
const metaHeaders = {};
const data = this.getValue();
Object.keys(data).forEach(key => {
if (key.startsWith('x-amz-meta-')) {
metaHeaders[key] = data[key];
}
});
if (Object.keys(metaHeaders).length > 0) {
return JSON.stringify(metaHeaders);
}
return undefined;
}
/**
* Set replication information
*
@ -758,7 +865,7 @@ class ObjectMD {
*/
setReplicationInfo(replicationInfo) {
const { status, backends, content, destination, storageClass, role,
storageType, dataStoreVersionId } = replicationInfo;
storageType, dataStoreVersionId, isNFS } = replicationInfo;
this._data.replicationInfo = {
status,
backends,
@ -768,6 +875,7 @@ class ObjectMD {
role,
storageType: storageType || '',
dataStoreVersionId: dataStoreVersionId || '',
isNFS: isNFS || null,
};
return this;
}
@ -786,6 +894,24 @@ class ObjectMD {
return this;
}
/**
* Set whether the replication is occurring from an NFS bucket.
* @param {Boolean} isNFS - Whether replication from an NFS bucket
* @return {ObjectMD} itself
*/
setReplicationIsNFS(isNFS) {
this._data.replicationInfo.isNFS = isNFS;
return this;
}
/**
* Get whether the replication is occurring from an NFS bucket.
* @return {Boolean} Whether replication from an NFS bucket
*/
getReplicationIsNFS() {
return this._data.replicationInfo.isNFS;
}
setReplicationSiteStatus(site, status) {
const backend = this._data.replicationInfo.backends
.find(o => o.site === site);
@ -832,6 +958,11 @@ class ObjectMD {
return this;
}
setReplicationStorageType(storageType) {
this._data.replicationInfo.storageType = storageType;
return this;
}
setReplicationStorageClass(storageClass) {
this._data.replicationInfo.storageClass = storageClass;
return this;
@ -913,6 +1044,9 @@ class ObjectMD {
Object.keys(metaHeaders).forEach(key => {
if (key.startsWith('x-amz-meta-')) {
this._data[key] = metaHeaders[key];
} else if (key.startsWith('x-ms-meta-')) {
const _key = key.replace('x-ms-meta-', 'x-amz-meta-');
this._data[_key] = metaHeaders[key];
}
});
// If a multipart object and the acl is already parsed, we update it
@ -922,6 +1056,20 @@ class ObjectMD {
return this;
}
/**
* Clear all existing meta headers (used for Azure)
*
* @return {ObjectMD} itself
*/
clearMetadataValues() {
Object.keys(this._data).forEach(key => {
if (key.startsWith('x-amz-meta')) {
delete this._data[key];
}
});
return this;
}
/**
* overrideMetadataValues (used for complete MPU and object copy)
*
@ -933,6 +1081,39 @@ class ObjectMD {
return this;
}
/**
* Create or update the microVersionId field
*
* This field can be used to force an update in MongoDB. This can
* be needed in the following cases:
*
* - in case no other metadata field changes
*
* - to detect a change when fields change but object version does
* not change e.g. when ingesting a putObjectTagging coming from
* S3C to Zenko
*
* - to manage conflicts during concurrent updates, using
* conditions on the microVersionId field.
*
* It's a field of 16 hexadecimal characters randomly generated
*
* @return {ObjectMD} itself
*/
updateMicroVersionId() {
this._data.microVersionId = crypto.randomBytes(8).toString('hex');
}
/**
* Get the microVersionId field, or null if not set
*
* @return {string|null} the microVersionId field if exists, or
* {null} if it does not exist
*/
getMicroVersionId() {
return this._data.microVersionId || null;
}
/**
* Set object legal hold status
* @param {boolean} legalHold - true if legal hold is 'ON' false if 'OFF'

View File

@ -0,0 +1,162 @@
/**
* Helper class to ease access to the Azure specific information for
* Blob and Container objects.
*/
class ObjectMDAzureInfo {
/**
* @constructor
* @param {object} obj - Raw structure for the Azure info on Blob/Container
* @param {string} obj.containerPublicAccess - Public access authorization
* type
* @param {object[]} obj.containerStoredAccessPolicies - Access policies
* for Shared Access Signature bearer
* @param {object} obj.containerImmutabilityPolicy - data immutability
* policy for this container
* @param {boolean} obj.containerLegalHoldStatus - legal hold status for
* this container
* @param {boolean} obj.containerDeletionInProgress - deletion in progress
* indicator for this container
* @param {string} obj.blobType - defines the type of blob for this object
* @param {string} obj.blobContentMD5 - whole object MD5 sum set by the
* client through the Azure API
* @param {string} obj.blobIssuedETag - backup of the issued ETag on MD only
* operations like Set Blob Properties and Set Blob Metadata
* @param {object} obj.blobCopyInfo - information pertaining to past and
* pending copy operation targeting this object
* @param {number} obj.blobSequenceNumber - sequence number for a PageBlob
* @param {Date} obj.blobAccessTierChangeTime - date of change of tier
* @param {boolean} obj.blobUncommitted - A block has been put for a
* nonexistent blob which is about to be created
*/
constructor(obj) {
this._data = {
containerPublicAccess: obj.containerPublicAccess,
containerStoredAccessPolicies: obj.containerStoredAccessPolicies,
containerImmutabilityPolicy: obj.containerImmutabilityPolicy,
containerLegalHoldStatus: obj.containerLegalHoldStatus,
containerDeletionInProgress: obj.containerDeletionInProgress,
blobType: obj.blobType,
blobContentMD5: obj.blobContentMD5,
blobIssuedETag: obj.blobIssuedETag,
blobCopyInfo: obj.blobCopyInfo,
blobSequenceNumber: obj.blobSequenceNumber,
blobAccessTierChangeTime: obj.blobAccessTierChangeTime,
blobUncommitted: obj.blobUncommitted,
};
}
getContainerPublicAccess() {
return this._data.containerPublicAccess;
}
setContainerPublicAccess(containerPublicAccess) {
this._data.containerPublicAccess = containerPublicAccess;
return this;
}
getContainerStoredAccessPolicies() {
return this._data.containerStoredAccessPolicies;
}
setContainerStoredAccessPolicies(containerStoredAccessPolicies) {
this._data.containerStoredAccessPolicies =
containerStoredAccessPolicies;
return this;
}
getContainerImmutabilityPolicy() {
return this._data.containerImmutabilityPolicy;
}
setContainerImmutabilityPolicy(containerImmutabilityPolicy) {
this._data.containerImmutabilityPolicy = containerImmutabilityPolicy;
return this;
}
getContainerLegalHoldStatus() {
return this._data.containerLegalHoldStatus;
}
setContainerLegalHoldStatus(containerLegalHoldStatus) {
this._data.containerLegalHoldStatus = containerLegalHoldStatus;
return this;
}
getContainerDeletionInProgress() {
return this._data.containerDeletionInProgress;
}
setContainerDeletionInProgress(containerDeletionInProgress) {
this._data.containerDeletionInProgress = containerDeletionInProgress;
return this;
}
getBlobType() {
return this._data.blobType;
}
setBlobType(blobType) {
this._data.blobType = blobType;
return this;
}
getBlobContentMD5() {
return this._data.blobContentMD5;
}
setBlobContentMD5(blobContentMD5) {
this._data.blobContentMD5 = blobContentMD5;
return this;
}
getBlobIssuedETag() {
return this._data.blobIssuedETag;
}
setBlobIssuedETag(blobIssuedETag) {
this._data.blobIssuedETag = blobIssuedETag;
return this;
}
getBlobCopyInfo() {
return this._data.blobCopyInfo;
}
setBlobCopyInfo(blobCopyInfo) {
this._data.blobCopyInfo = blobCopyInfo;
return this;
}
getBlobSequenceNumber() {
return this._data.blobSequenceNumber;
}
setBlobSequenceNumber(blobSequenceNumber) {
this._data.blobSequenceNumber = blobSequenceNumber;
return this;
}
getBlobAccessTierChangeTime() {
return this._data.blobAccessTierChangeTime;
}
setBlobAccessTierChangeTime(blobAccessTierChangeTime) {
this._data.blobAccessTierChangeTime = blobAccessTierChangeTime;
return this;
}
getBlobUncommitted() {
return this._data.blobUncommitted;
}
setBlobUncommitted(blobUncommitted) {
this._data.blobUncommitted = blobUncommitted;
return this;
}
getValue() {
return this._data;
}
}
module.exports = ObjectMDAzureInfo;

View File

@ -3,7 +3,6 @@
* 'location' array
*/
class ObjectMDLocation {
/**
* @constructor
* @param {object} locationObj - single data location info
@ -14,10 +13,14 @@ class ObjectMDLocation {
* @param {string} locationObj.dataStoreName - type of data store
* @param {string} locationObj.dataStoreETag - internal ETag of
* data part
* @param {string} [locationObj.dataStoreVersionId] - versionId,
* needed for cloud backends
* @param {number} [location.cryptoScheme] - if location data is
* encrypted: the encryption scheme version
* @param {string} [location.cipheredDataKey] - if location data
* is encrypted: the base64-encoded ciphered data key
* @param {string} [locationObj.blockId] - blockId of the part,
* set by the Azure Blob Service REST API frontend
*/
constructor(locationObj) {
this._data = {
@ -26,6 +29,8 @@ class ObjectMDLocation {
size: locationObj.size,
dataStoreName: locationObj.dataStoreName,
dataStoreETag: locationObj.dataStoreETag,
dataStoreVersionId: locationObj.dataStoreVersionId,
blockId: locationObj.blockId,
};
if (locationObj.cryptoScheme) {
this._data.cryptoScheme = locationObj.cryptoScheme;
@ -47,6 +52,7 @@ class ObjectMDLocation {
* @param {object} location - single data location info
* @param {string} location.key - data backend key
* @param {string} location.dataStoreName - type of data store
* @param {string} [location.dataStoreVersionId] - data backend version ID
* @param {number} [location.cryptoScheme] - if location data is
* encrypted: the encryption scheme version
* @param {string} [location.cipheredDataKey] - if location data
@ -57,6 +63,7 @@ class ObjectMDLocation {
[
'key',
'dataStoreName',
'dataStoreVersionId',
'cryptoScheme',
'cipheredDataKey',
].forEach(attrName => {
@ -73,6 +80,10 @@ class ObjectMDLocation {
return this._data.dataStoreETag;
}
getDataStoreVersionId() {
return this._data.dataStoreVersionId;
}
getPartNumber() {
return Number.parseInt(this._data.dataStoreETag.split(':')[0], 10);
}
@ -107,6 +118,15 @@ class ObjectMDLocation {
return this._data.cipheredDataKey;
}
getBlockId() {
return this._data.blockId;
}
setBlockId(blockId) {
this._data.blockId = blockId;
return this;
}
getValue() {
return this._data;
}

View File

@ -59,6 +59,7 @@ class ReplicationConfiguration {
this._rules = null;
this._prevStorageClass = null;
this._hasScalityDestination = null;
this._preferredReadLocation = null;
}
/**
@ -85,6 +86,18 @@ class ReplicationConfiguration {
return this._rules;
}
/**
* The preferred read location
* @return {string|null} - The preferred read location if defined,
* otherwise null
*
* FIXME ideally we should be able to specify one preferred read
* location for each rule
*/
getPreferredReadLocation() {
return this._preferredReadLocation;
}
/**
* Get the replication configuration
* @return {object} - The replication configuration
@ -94,6 +107,7 @@ class ReplicationConfiguration {
role: this.getRole(),
destination: this.getDestination(),
rules: this.getRules(),
preferredReadLocation: this.getPreferredReadLocation(),
};
}
@ -292,6 +306,14 @@ class ReplicationConfiguration {
return undefined;
}
const storageClasses = destination.StorageClass[0].split(',');
const prefReadIndex = storageClasses.findIndex(storageClass =>
storageClass.endsWith(':preferred_read'));
if (prefReadIndex !== -1) {
const prefRead = storageClasses[prefReadIndex].split(':')[0];
// remove :preferred_read tag from storage class name
storageClasses[prefReadIndex] = prefRead;
this._preferredReadLocation = prefRead;
}
const isValidStorageClass = storageClasses.every(storageClass => {
if (validStorageClasses.includes(storageClass)) {
this._hasScalityDestination =

View File

@ -111,7 +111,7 @@ class RoundRobin {
pickHost() {
if (this.logger) {
this.logger.debug('pick host',
{ host: this.getCurrentHost() });
{ host: this.getCurrentHost() });
}
const curHost = this.getCurrentHost();
++this.pickCount;
@ -163,7 +163,7 @@ class RoundRobin {
}
if (this.logger) {
this.logger.debug('round robin host',
{ newHost: this.getCurrentHost() });
{ newHost: this.getCurrentHost() });
}
}
}

View File

@ -10,7 +10,6 @@ const { checkSupportIPv6 } = require('./utils');
class Server {
/**
* @constructor
*
@ -369,6 +368,8 @@ class Server {
error: err.stack || err,
address: sock.address(),
});
// socket is not systematically destroyed
sock.destroy();
}
/**
@ -429,16 +430,16 @@ class Server {
// Setting no delay of the socket to the value configured
sock.setNoDelay(this.isNoDelay());
sock.on('error', err => this._logger.info(
'socket error - request rejected', { error: err }));
'socket error - request rejected', { error: err }));
});
this._server.on('tlsClientError', (err, sock) =>
this._onClientError(err, sock));
this._onClientError(err, sock));
this._server.on('clientError', (err, sock) =>
this._onClientError(err, sock));
this._onClientError(err, sock));
this._server.on('checkContinue', (req, res) =>
this._onCheckContinue(req, res));
this._onCheckContinue(req, res));
this._server.on('checkExpectation', (req, res) =>
this._onCheckExpectation(req, res));
this._onCheckExpectation(req, res));
this._server.on('listening', () => this._onListening());
}
this._server.listen(this._port, this._address);

View File

@ -72,8 +72,8 @@ function getByteRangeFromSpec(rangeSpec, objectSize) {
if (rangeSpec.start < objectSize) {
// test is false if end is undefined
return { range: [rangeSpec.start,
(rangeSpec.end < objectSize ?
rangeSpec.end : objectSize - 1)] };
(rangeSpec.end < objectSize ?
rangeSpec.end : objectSize - 1)] };
}
return { error: errors.InvalidRange };
}

View File

@ -95,8 +95,8 @@ function _negotiateProtocolVersion(client, logger, cb) {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::negotiateProtocolVersion',
{ error,
vendorIdentification: client.vendorIdentification });
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const majorVersions =
@ -107,8 +107,8 @@ function _negotiateProtocolVersion(client, logger, cb) {
majorVersions.length !== minorVersions.length) {
const error = _arsenalError('No suitable protocol version');
logger.error('KMIP::negotiateProtocolVersion',
{ error,
vendorIdentification: client.vendorIdentification });
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
client.kmip.changeProtocolVersion(majorVersions[0], minorVersions[0]);
@ -131,8 +131,8 @@ function _mapExtensions(client, logger, cb) {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::mapExtensions',
{ error,
vendorIdentification: client.vendorIdentification });
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const extensionNames = response.lookup(searchFilter.extensionName);
@ -140,8 +140,8 @@ function _mapExtensions(client, logger, cb) {
if (extensionNames.length !== extensionTags.length) {
const error = _arsenalError('Inconsistent extension list');
logger.error('KMIP::mapExtensions',
{ error,
vendorIdentification: client.vendorIdentification });
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
extensionNames.forEach((extensionName, idx) => {
@ -165,7 +165,7 @@ function _queryServerInformation(client, logger, cb) {
if (err) {
const error = _arsenalError(err);
logger.warn('KMIP::queryServerInformation',
{ error });
{ error });
/* no error returned, caller can keep going */
return cb();
}
@ -175,9 +175,9 @@ function _queryServerInformation(client, logger, cb) {
JSON.stringify(response.lookup(searchFilter.serverInformation)[0]));
logger.info('KMIP Server identified',
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
negotiatedProtocolVersion: client.kmip.protocolVersion });
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
negotiatedProtocolVersion: client.kmip.protocolVersion });
return cb();
});
}
@ -201,8 +201,8 @@ function _queryOperationsAndObjects(client, logger, cb) {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::queryOperationsAndObjects',
{ error,
vendorIdentification: client.vendorIdentification });
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const supportedOperations = response.lookup(searchFilter.operation);
@ -227,15 +227,15 @@ function _queryOperationsAndObjects(client, logger, cb) {
logger.warn('KMIP::queryOperationsAndObjects: ' +
'The KMIP Server announces that it ' +
'does not support all of the required features',
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
supportsEncrypt, supportsDecrypt,
supportsActivate, supportsRevoke,
supportsCreate, supportsDestroy,
supportsQuery, supportsSymmetricKeys });
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
supportsEncrypt, supportsDecrypt,
supportsActivate, supportsRevoke,
supportsCreate, supportsDestroy,
supportsQuery, supportsSymmetricKeys });
} else {
logger.info('KMIP Server provides the necessary feature set',
{ vendorIdentification: client.vendorIdentification });
{ vendorIdentification: client.vendorIdentification });
}
return cb();
});
@ -269,8 +269,8 @@ class Client {
this.vendorIdentification = '';
this.serverInformation = [];
this.kmip = new KMIP(CodecClass || TTLVCodec,
TransportClass || TlsTransport,
options);
TransportClass || TlsTransport,
options);
this.kmip.registerHandshakeFunction((logger, cb) => {
this._kmipHandshake(logger, cb);
});
@ -327,8 +327,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::_activateBucketKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
@ -337,7 +337,7 @@ class Client {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::cipherDataKey',
{ error, uniqueIdentifier });
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, keyIdentifier);
@ -356,20 +356,20 @@ class Client {
const attributes = [];
if (!!this.options.bucketNameAttributeName) {
attributes.push(KMIP.Attribute('TextString',
this.options.bucketNameAttributeName,
bucketName));
this.options.bucketNameAttributeName,
bucketName));
}
attributes.push(...[
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
CRYPTOGRAPHIC_ALGORITHM),
KMIP.Attribute('Integer', 'Cryptographic Length',
CRYPTOGRAPHIC_LENGTH),
CRYPTOGRAPHIC_LENGTH),
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
this.kmip.encodeMask('Cryptographic Usage Mask',
CRYPTOGRAPHIC_USAGE_MASK))]);
this.kmip.encodeMask('Cryptographic Usage Mask',
CRYPTOGRAPHIC_USAGE_MASK))]);
if (this.options.compoundCreateActivate) {
attributes.push(KMIP.Attribute('Date-Time', 'Activation Date',
new Date(Date.UTC())));
new Date(Date.UTC())));
}
return this.kmip.request(logger, 'Create', [
@ -379,8 +379,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::createBucketKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const createdObjectType =
@ -391,7 +391,7 @@ class Client {
const error = _arsenalError(
'Server created an object of wrong type');
logger.error('KMIP::createBucketKey',
{ error, createdObjectType });
{ error, createdObjectType });
return cb(error);
}
if (!this.options.compoundCreateActivate) {
@ -416,16 +416,16 @@ class Client {
KMIP.TextString('Unique Identifier', bucketKeyId),
KMIP.Structure('Revocation Reason', [
KMIP.Enumeration('Revocation Reason Code',
'Cessation of Operation'),
'Cessation of Operation'),
KMIP.TextString('Revocation Message',
'About to be deleted'),
'About to be deleted'),
]),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::_revokeBucketKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
@ -434,7 +434,7 @@ class Client {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::_revokeBucketKey',
{ error, uniqueIdentifier });
{ error, uniqueIdentifier });
return cb(error);
}
return cb();
@ -453,8 +453,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::destroyBucketKey: revocation failed',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
return this.kmip.request(logger, 'Destroy', [
@ -463,8 +463,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::destroyBucketKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
@ -473,7 +473,7 @@ class Client {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::destroyBucketKey',
{ error, uniqueIdentifier });
{ error, uniqueIdentifier });
return cb(error);
}
return cb();
@ -492,19 +492,19 @@ class Client {
* @callback called with (err, cipheredDataKey: Buffer)
*/
cipherDataKey(cryptoScheme,
masterKeyId,
plainTextDataKey,
logger,
cb) {
masterKeyId,
plainTextDataKey,
logger,
cb) {
return this.kmip.request(logger, 'Encrypt', [
KMIP.TextString('Unique Identifier', masterKeyId),
KMIP.Structure('Cryptographic Parameters', [
KMIP.Enumeration('Block Cipher Mode',
CRYPTOGRAPHIC_CIPHER_MODE),
CRYPTOGRAPHIC_CIPHER_MODE),
KMIP.Enumeration('Padding Method',
CRYPTOGRAPHIC_PADDING_METHOD),
CRYPTOGRAPHIC_PADDING_METHOD),
KMIP.Enumeration('Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
CRYPTOGRAPHIC_ALGORITHM),
]),
KMIP.ByteString('Data', plainTextDataKey),
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
@ -512,8 +512,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::cipherDataKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
@ -523,7 +523,7 @@ class Client {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::cipherDataKey',
{ error, uniqueIdentifier });
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, data);
@ -541,19 +541,19 @@ class Client {
* @callback called with (err, plainTextDataKey: Buffer)
*/
decipherDataKey(cryptoScheme,
masterKeyId,
cipheredDataKey,
logger,
cb) {
masterKeyId,
cipheredDataKey,
logger,
cb) {
return this.kmip.request(logger, 'Decrypt', [
KMIP.TextString('Unique Identifier', masterKeyId),
KMIP.Structure('Cryptographic Parameters', [
KMIP.Enumeration('Block Cipher Mode',
CRYPTOGRAPHIC_CIPHER_MODE),
CRYPTOGRAPHIC_CIPHER_MODE),
KMIP.Enumeration('Padding Method',
CRYPTOGRAPHIC_PADDING_METHOD),
CRYPTOGRAPHIC_PADDING_METHOD),
KMIP.Enumeration('Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
CRYPTOGRAPHIC_ALGORITHM),
]),
KMIP.ByteString('Data', cipheredDataKey),
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
@ -561,8 +561,8 @@ class Client {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::decipherDataKey',
{ error,
serverInformation: this.serverInformation });
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
@ -572,7 +572,7 @@ class Client {
const error = _arsenalError(
'Server did not return the right identifier');
logger.error('KMIP::decipherDataKey',
{ error, uniqueIdentifier });
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, data);

View File

@ -55,15 +55,15 @@ function TTLVCodec() {
const property = {};
if (!TypeDecoder[elementType]) {
_throwError(logger,
'Unknown element type',
{ funcName, elementTag, elementType });
'Unknown element type',
{ funcName, elementTag, elementType });
}
const elementValue = value.slice(i + 8,
i + 8 + elementLength);
i + 8 + elementLength);
if (elementValue.length !== elementLength) {
_throwError(logger, 'BUG: Wrong buffer size',
{ funcName, elementLength,
bufferLength: elementValue.length });
{ funcName, elementLength,
bufferLength: elementValue.length });
}
property.type = TypeDecoder[elementType].name;
property.value = TypeDecoder[elementType]
@ -75,7 +75,7 @@ function TTLVCodec() {
const tagInfo = TagDecoder[elementTag];
if (!tagInfo) {
logger.debug('Unknown element tag',
{ funcName, elementTag });
{ funcName, elementTag });
property.tag = elementTag;
element['Unknown Tag'] = property;
} else {
@ -83,8 +83,8 @@ function TTLVCodec() {
if (tagInfo.name === 'Attribute Name') {
if (property.type !== 'TextString') {
_throwError(logger,
'Invalide type',
{ funcName, type: property.type });
'Invalide type',
{ funcName, type: property.type });
}
diversion = property.value;
}
@ -114,8 +114,8 @@ function TTLVCodec() {
}
const itemResult =
TypeEncoder[itemType].encode(itemTagName,
itemValue,
itemDiversion);
itemValue,
itemDiversion);
encodedValue = encodedValue
.concat(_ttlvPadVector(itemResult));
});
@ -133,9 +133,9 @@ function TTLVCodec() {
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
return value.readUInt32BE(0);
},
@ -156,16 +156,16 @@ function TTLVCodec() {
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const longUInt = UINT32_MAX * value.readUInt32BE(0) +
value.readUInt32BE(4);
if (longUInt > Number.MAX_SAFE_INTEGER) {
_throwError(logger,
'53-bit overflow',
{ funcName, longUInt });
'53-bit overflow',
{ funcName, longUInt });
}
return longUInt;
},
@ -200,9 +200,9 @@ function TTLVCodec() {
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const enumValue = value.toString('hex');
const actualTag = diversion ? TagEncoder[diversion].value : tag;
@ -211,10 +211,10 @@ function TTLVCodec() {
!enumInfo.enumeration ||
!enumInfo.enumeration[enumValue]) {
return { tag,
value: enumValue,
message: 'Unknown enumeration value',
diversion,
};
value: enumValue,
message: 'Unknown enumeration value',
diversion,
};
}
return enumInfo.enumeration[enumValue];
},
@ -227,7 +227,7 @@ function TTLVCodec() {
const actualTag = diversion || tagName;
const encodedValue =
Buffer.from(TagEncoder[actualTag].enumeration[value],
'hex');
'hex');
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
@ -238,9 +238,9 @@ function TTLVCodec() {
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const msUInt = value.readUInt32BE(0);
const lsUInt = value.readUInt32BE(4);
@ -267,7 +267,7 @@ function TTLVCodec() {
const length = Buffer.alloc(4);
length.writeUInt32BE(value.length);
return _ttlvPadVector([tag, type, length,
Buffer.from(value, 'utf8')]);
Buffer.from(value, 'utf8')]);
},
},
'08': {
@ -289,17 +289,17 @@ function TTLVCodec() {
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const d = new Date(0);
const utcSeconds = UINT32_MAX * value.readUInt32BE(0) +
value.readUInt32BE(4);
if (utcSeconds > Number.MAX_SAFE_INTEGER) {
_throwError(logger,
'53-bit overflow',
{ funcName, utcSeconds });
'53-bit overflow',
{ funcName, utcSeconds });
}
d.setUTCSeconds(utcSeconds);
return d;
@ -323,9 +323,9 @@ function TTLVCodec() {
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
return value.readInt32BE(0);
},
@ -415,8 +415,8 @@ function TTLVCodec() {
throw Error(`Unknown Type '${type}'`);
}
const itemValue = TypeEncoder[type].encode(key,
item[key].value,
item[key].diversion);
item[key].value,
item[key].diversion);
result = result.concat(_ttlvPadVector(itemValue));
});
});

View File

@ -275,11 +275,11 @@ class KMIP {
KMIP.Structure('Request Header', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major',
this.protocolVersion.major),
this.protocolVersion.major),
KMIP.Integer('Protocol Version Minor',
this.protocolVersion.minor)]),
this.protocolVersion.minor)]),
KMIP.Integer('Maximum Response Size',
this.maximumResponseSize),
this.maximumResponseSize),
KMIP.Integer('Batch Count', 1)]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', operation),
@ -292,7 +292,7 @@ class KMIP {
(err, conversation, rawResponse) => {
if (err) {
logger.error('KMIP::request: Failed to send message',
{ error: err });
{ error: err });
return cb(err);
}
const response = this._decodeMessage(logger, rawResponse);
@ -311,16 +311,16 @@ class KMIP {
this.transport.abortPipeline(conversation);
const error = Error('Invalid batch item ID returned');
logger.error('KMIP::request: failed',
{ resultUniqueBatchItemID, uuid, error });
{ resultUniqueBatchItemID, uuid, error });
return cb(error);
}
if (performedOperation !== operation) {
this.transport.abortPipeline(conversation);
const error = Error('Operation mismatch',
{ got: performedOperation,
expected: operation });
{ got: performedOperation,
expected: operation });
logger.error('KMIP::request: Operation mismatch',
{ error });
{ error });
return cb(error);
}
if (resultStatus !== 'Success') {
@ -331,19 +331,17 @@ class KMIP {
response.lookup(
'Response Message/Batch Item/Result Message')[0];
const error = Error('KMIP request failure',
{ resultStatus,
resultReason,
resultMessage });
{ resultStatus,
resultReason,
resultMessage });
logger.error('KMIP::request: request failed',
{ error, resultStatus,
resultReason, resultMessage });
{ error, resultStatus,
resultReason, resultMessage });
return cb(error);
}
return cb(null, response);
});
}
}

View File

@ -86,8 +86,8 @@ class TransportTemplate {
const deferedRequest = this.deferedRequests.shift();
process.nextTick(() => {
this.send(logger,
deferedRequest.encodedMessage,
deferedRequest.cb);
deferedRequest.encodedMessage,
deferedRequest.cb);
});
} else if (this.callbackPipeline.length === 0 &&
this.deferedRequests.length === 0 &&

View File

@ -0,0 +1,76 @@
const httpServer = require('../http/server');
const werelogs = require('werelogs');
const errors = require('../../errors');
const ZenkoMetrics = require('../../metrics/ZenkoMetrics');
const { sendSuccess, sendError } = require('./Utils');
function checkStub(log) { // eslint-disable-line
return true;
}
class HealthProbeServer extends httpServer {
constructor(params) {
const logging = new werelogs.Logger('HealthProbeServer');
super(params.port, logging);
this.logging = logging;
this.setBindAddress(params.bindAddress || 'localhost');
// hooking our request processing function by calling the
// parent's method for that
this.onRequest(this._onRequest);
this._reqHandlers = {
'/_/health/liveness': this._onLiveness.bind(this),
'/_/health/readiness': this._onReadiness.bind(this),
'/_/monitoring/metrics': this._onMetrics.bind(this),
};
this._livenessCheck = params.livenessCheck || checkStub;
this._readinessCheck = params.readinessCheck || checkStub;
}
onLiveCheck(f) {
this._livenessCheck = f;
}
onReadyCheck(f) {
this._readinessCheck = f;
}
_onRequest(req, res) {
const log = this.logging.newRequestLogger();
log.debug('request received', { method: req.method,
url: req.url });
if (req.method !== 'GET') {
sendError(res, log, errors.MethodNotAllowed);
} else if (req.url in this._reqHandlers) {
this._reqHandlers[req.url](req, res, log);
} else {
sendError(res, log, errors.InvalidURI);
}
}
_onLiveness(req, res, log) {
if (this._livenessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
_onReadiness(req, res, log) {
if (this._readinessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
// expose metrics to Prometheus
_onMetrics(req, res) {
res.writeHead(200, {
'Content-Type': ZenkoMetrics.asPrometheusContentType(),
});
res.end(ZenkoMetrics.asPrometheus());
}
}
module.exports = HealthProbeServer;

View File

@ -3,19 +3,17 @@ const werelogs = require('werelogs');
const errors = require('../../errors');
const DEFAULT_LIVE_ROUTE = '/_/live';
const DEFAULT_READY_ROUTE = '/_/live';
const DEFAULT_METRICS_ROUTE = '/_/metrics';
const DEFAULT_READY_ROUTE = '/_/ready';
const DEFAULT_METRICS_ROUTE = '/metrics';
/**
* ProbeDelegate is used to determine if a probe is successful or
* if any errors are present.
* If everything is working as intended, it is a no-op.
* Otherwise, return a string representing what is failing.
* ProbeDelegate is used to handle probe checks.
* You can sendSuccess and sendError from Utils to handle success
* and failure conditions.
* @callback ProbeDelegate
* @param { import('http').ServerResponse } res - HTTP response for writing
* @param {werelogs.Logger} log - Werelogs instance for logging if you choose to
* @return {(string|undefined)} String representing issues to report. An empty
* string or undefined is used to represent no issues.
* @return {undefined}
*/
/**
@ -91,13 +89,7 @@ class ProbeServer extends httpServer {
return;
}
const probeResponse = this._handlers.get(req.url)(res, log);
if (probeResponse !== undefined && probeResponse !== '') {
// Return an internal error with the response
errors.InternalError
.customizeDescription(probeResponse)
.writeResponse(res);
}
this._handlers.get(req.url)(res, log);
}
}

View File

@ -0,0 +1,41 @@
/**
* Send a successful HTTP response of 200 OK
* @param {http.ServerResponse} res - HTTP response for writing
* @param {werelogs.Logger} log - Werelogs instance for logging if you choose to
* @param {string} [message] - Message to send as response, defaults to OK
* @returns {undefined}
*/
function sendSuccess(res, log, message = 'OK') {
log.debug('replying with success');
res.writeHead(200);
res.end(message);
}
/**
* Send an Arsenal Error response
* @param {http.ServerResponse} res - HTTP response for writing
* @param {werelogs.Logger} log - Werelogs instance for logging if you choose to
* @param {ArsenalError} error - Error to send back to the user
* @param {string} [optMessage] - Message to use instead of the errors message
* @returns {undefined}
*/
function sendError(res, log, error, optMessage) {
const message = optMessage || error.description || '';
log.debug('sending back error response',
{
httpCode: error.code,
errorType: error.message,
error: message,
},
);
res.writeHead(error.code);
res.end(JSON.stringify({
errorType: error.message,
errorMessage: message,
}));
}
module.exports = {
sendSuccess,
sendError,
};

View File

@ -81,6 +81,7 @@ class RESTClient {
this.host = params.host;
this.port = params.port;
this.isPassthrough = params.isPassthrough || false;
this.setupLogging(params.logApi);
this.httpAgent = new HttpAgent({
keepAlive: true,
@ -119,11 +120,13 @@ class RESTClient {
doRequest(method, headers, key, log, responseCb) {
const reqHeaders = headers || {};
const urlKey = key || '';
const prefix = this.isPassthrough ?
constants.passthroughFileURL : constants.dataFileURL;
const reqParams = {
hostname: this.host,
port: this.port,
method,
path: `${constants.dataFileURL}/${urlKey}`,
path: encodeURI(`${prefix}/${urlKey}`),
headers: reqHeaders,
agent: this.httpAgent,
};

View File

@ -7,7 +7,7 @@ const werelogs = require('werelogs');
const httpServer = require('../http/server');
const constants = require('../../constants');
const utils = require('./utils');
const { parseURL } = require('./utils');
const httpUtils = require('../http/utils');
const errors = require('../../errors');
@ -19,7 +19,7 @@ function setContentRange(response, byteRange, objectSize) {
const [start, end] = byteRange;
assert(start !== undefined && end !== undefined);
response.setHeader('Content-Range',
`bytes ${start}-${end}/${objectSize}`);
`bytes ${start}-${end}/${objectSize}`);
}
function sendError(res, log, error, optMessage) {
@ -37,42 +37,6 @@ function sendError(res, log, error, optMessage) {
errorMessage: message })}\n`);
}
/**
* Parse the given url and return a pathInfo object. Sanity checks are
* performed.
*
* @param {String} urlStr - URL to parse
* @param {Boolean} expectKey - whether the command expects to see a
* key in the URL
* @return {Object} a pathInfo object with URL items containing the
* following attributes:
* - pathInfo.service {String} - The name of REST service ("DataFile")
* - pathInfo.key {String} - The requested key
*/
function parseURL(urlStr, expectKey) {
const urlObj = url.parse(urlStr);
const pathInfo = utils.explodePath(urlObj.path);
if (pathInfo.service !== constants.dataFileURL) {
throw errors.InvalidAction.customizeDescription(
`unsupported service '${pathInfo.service}'`);
}
if (expectKey && pathInfo.key === undefined) {
throw errors.MissingParameter.customizeDescription(
'URL is missing key');
}
if (!expectKey && pathInfo.key !== undefined) {
// note: we may implement rewrite functionality by allowing a
// key in the URL, though we may still provide the new key in
// the Location header to keep immutability property and
// atomicity of the update (we would just remove the old
// object when the new one has been written entirely in this
// case, saving a request over an equivalent PUT + DELETE).
throw errors.InvalidURI.customizeDescription(
'PUT url cannot contain a key');
}
return pathInfo;
}
/**
* @class
* @classdesc REST Server interface
@ -81,7 +45,6 @@ function parseURL(urlStr, expectKey) {
* start() to start listening to the configured port.
*/
class RESTServer extends httpServer {
/**
* @constructor
* @param {Object} params - constructor params
@ -263,7 +226,7 @@ class RESTServer extends httpServer {
return sendError(res, log, err);
}
log.debug('sending back 200/206 response with contents',
{ key: pathInfo.key });
{ key: pathInfo.key });
setContentLength(res, contentLength);
res.setHeader('Accept-Ranges', 'bytes');
if (byteRange) {
@ -301,7 +264,7 @@ class RESTServer extends httpServer {
return sendError(res, log, err);
}
log.debug('sending back 204 response to DELETE',
{ key: pathInfo.key });
{ key: pathInfo.key });
res.writeHead(204);
return res.end(() => {
log.debug('DELETE response sent', { key: pathInfo.key });

View File

@ -1,15 +1,68 @@
'use strict'; // eslint-disable-line
const errors = require('../../errors');
const constants = require('../../constants');
const url = require('url');
module.exports.explodePath = function explodePath(path) {
const passthroughPrefixLength = constants.passthroughFileURL.length;
function explodePath(path) {
if (path.startsWith(constants.passthroughFileURL)) {
const key = path.slice(passthroughPrefixLength + 1);
return {
service: constants.passthroughFileURL,
key: key.length > 0 ? key : undefined,
};
}
const pathMatch = /^(\/[a-zA-Z0-9]+)(\/([0-9a-f]*))?$/.exec(path);
if (pathMatch) {
return {
service: pathMatch[1],
key: (pathMatch[3] !== undefined && pathMatch[3].length > 0 ?
pathMatch[3] : undefined),
pathMatch[3] : undefined),
};
}
throw errors.InvalidURI.customizeDescription('malformed URI');
}
/**
* Parse the given url and return a pathInfo object. Sanity checks are
* performed.
*
* @param {String} urlStr - URL to parse
* @param {Boolean} expectKey - whether the command expects to see a
* key in the URL
* @return {Object} a pathInfo object with URL items containing the
* following attributes:
* - pathInfo.service {String} - The name of REST service ("DataFile")
* - pathInfo.key {String} - The requested key
*/
function parseURL(urlStr, expectKey) {
const urlObj = url.parse(urlStr);
const pathInfo = explodePath(decodeURI(urlObj.path));
if ((pathInfo.service !== constants.dataFileURL)
&& (pathInfo.service !== constants.passthroughFileURL)) {
throw errors.InvalidAction.customizeDescription(
`unsupported service '${pathInfo.service}'`);
}
if (expectKey && pathInfo.key === undefined) {
throw errors.MissingParameter.customizeDescription(
'URL is missing key');
}
if (!expectKey && pathInfo.key !== undefined) {
// note: we may implement rewrite functionality by allowing a
// key in the URL, though we may still provide the new key in
// the Location header to keep immutability property and
// atomicity of the update (we would just remove the old
// object when the new one has been written entirely in this
// case, saving a request over an equivalent PUT + DELETE).
throw errors.InvalidURI.customizeDescription(
'PUT url cannot contain a key');
}
return pathInfo;
}
module.exports = {
explodePath,
parseURL,
};

View File

@ -17,7 +17,6 @@ const rpc = require('./rpc.js');
* RPC client object accessing the sub-level transparently.
*/
class LevelDbClient extends rpc.BaseClient {
/**
* @constructor
*
@ -78,7 +77,6 @@ class LevelDbClient extends rpc.BaseClient {
* env.subDb (env is passed as first parameter of received RPC calls).
*/
class LevelDbService extends rpc.BaseService {
/**
* @constructor
*

View File

@ -37,7 +37,6 @@ let streamRPCJSONObj;
* an error occurred).
*/
class BaseClient extends EventEmitter {
/**
* @constructor
*
@ -54,7 +53,7 @@ class BaseClient extends EventEmitter {
*/
constructor(params) {
const { url, logger, callTimeoutMs,
streamMaxPendingAck, streamAckTimeoutMs } = params;
streamMaxPendingAck, streamAckTimeoutMs } = params;
assert(url);
assert(logger);
@ -82,11 +81,11 @@ class BaseClient extends EventEmitter {
_call(remoteCall, args, cb) {
const wrapCb = (err, data) => {
cb(reconstructError(err),
this.socketStreams.decodeStreams(data));
this.socketStreams.decodeStreams(data));
};
this.logger.debug('remote call', { remoteCall, args });
this.socket.emit('call', remoteCall,
this.socketStreams.encodeStreams(args), wrapCb);
this.socketStreams.encodeStreams(args), wrapCb);
return undefined;
}
@ -113,8 +112,8 @@ class BaseClient extends EventEmitter {
throw new Error(`argument cb=${cb} is not a callback`);
}
async.timeout(this._call.bind(this), timeoutMs,
`operation ${remoteCall} timed out`)(remoteCall,
args, cb);
`operation ${remoteCall} timed out`)(remoteCall,
args, cb);
return undefined;
}
@ -142,7 +141,7 @@ class BaseClient extends EventEmitter {
const url = this.url;
this.socket.on('error', err => {
this.logger.warn('connectivity error to the RPC service',
{ url, error: err });
{ url, error: err });
});
this.socket.on('connect', () => {
this.emit('connect');
@ -156,7 +155,7 @@ class BaseClient extends EventEmitter {
this.getManifest((err, manifest) => {
if (err) {
this.logger.error('Error fetching manifest from RPC server',
{ error: err });
{ error: err });
} else {
manifest.api.forEach(apiItem => {
this.createCall(apiItem.name);
@ -251,7 +250,6 @@ class BaseClient extends EventEmitter {
*
*/
class BaseService {
/**
* @constructor
*
@ -497,7 +495,7 @@ function RPCServer(params) {
conn.on('error', err => {
log.error('error on socket.io connection',
{ namespace: service.namespace, error: err });
{ namespace: service.namespace, error: err });
});
conn.on('call', (remoteCall, args, cb) => {
const decodedArgs = streamsSocket.decodeStreams(args);
@ -647,8 +645,8 @@ streamRPCJSONObj = function _streamRPCJSONObj(obj, wstream, cb) {
// primitive types
if (obj === undefined) {
wstream.write('null'); // if undefined elements are present in
// arrays, convert them to JSON null
// objects
// arrays, convert them to JSON null
// objects
} else {
wstream.write(JSON.stringify(obj));
}

View File

@ -16,7 +16,7 @@ class SIOOutputStream extends stream.Writable {
constructor(socket, streamId, maxPendingAck, ackTimeoutMs) {
super({ objectMode: true });
this._initOutputStream(socket, streamId, maxPendingAck,
ackTimeoutMs);
ackTimeoutMs);
}
_initOutputStream(socket, streamId, maxPendingAck, ackTimeoutMs) {
@ -194,7 +194,7 @@ class SIOStreamSocket {
this.socket.on('stream-data', (payload, cb) => {
const { streamId, data } = payload;
log.debug('received \'stream-data\' event',
{ streamId, size: data.length });
{ streamId, size: data.length });
const stream = this.remoteStreams[streamId];
if (!stream) {
log.debug('no such remote stream registered', { streamId });
@ -280,15 +280,15 @@ class SIOStreamSocket {
let transportStream;
if (isReadStream) {
transportStream = new SIOOutputStream(this, streamId,
this.maxPendingAck,
this.ackTimeoutMs);
this.maxPendingAck,
this.ackTimeoutMs);
} else {
transportStream = new SIOInputStream(this, streamId);
}
this.localStreams[streamId] = arg;
arg.once('close', () => {
log.debug('stream closed, removing from local streams',
{ streamId });
{ streamId });
delete this.localStreams[streamId];
});
arg.on('error', error => {
@ -350,8 +350,8 @@ class SIOStreamSocket {
stream = new SIOInputStream(this, streamId);
} else if (arg.writable) {
stream = new SIOOutputStream(this, streamId,
this.maxPendingAck,
this.ackTimeoutMs);
this.maxPendingAck,
this.ackTimeoutMs);
} else {
throw new Error('can\'t decode stream neither readable ' +
'nor writable');
@ -360,14 +360,14 @@ class SIOStreamSocket {
if (arg.readable) {
stream.once('close', () => {
log.debug('stream closed, removing from remote streams',
{ streamId });
{ streamId });
delete this.remoteStreams[streamId];
});
}
if (arg.writable) {
stream.once('finish', () => {
log.debug('stream finished, removing from remote streams',
{ streamId });
{ streamId });
delete this.remoteStreams[streamId];
});
}
@ -399,7 +399,7 @@ class SIOStreamSocket {
_write(streamId, data, cb) {
this.logger.debug('emit \'stream-data\' event',
{ streamId, size: data.length });
{ streamId, size: data.length });
this.socket.emit('stream-data', { streamId, data }, cb);
}

View File

@ -0,0 +1,159 @@
'use strict'; // eslint-disable-line strict
const { URL } = require('url');
const { decryptSecret } = require('../executables/pensieveCreds/utils');
function patchLocations(overlayLocations, creds, log) {
if (!overlayLocations) {
return {};
}
const locations = {};
Object.keys(overlayLocations).forEach(k => {
const l = overlayLocations[k];
const location = {
name: k,
objectId: l.objectId,
details: l.details || {},
locationType: l.locationType,
};
let supportsVersioning = false;
let pathStyle = process.env.CI_CEPH !== undefined;
switch (l.locationType) {
case 'location-mem-v1':
location.type = 'mem';
location.details = { supportsVersioning: true };
break;
case 'location-file-v1':
location.type = 'file';
location.details = { supportsVersioning: true };
break;
case 'location-azure-v1':
location.type = 'azure';
if (l.details.secretKey && l.details.secretKey.length > 0) {
location.details = {
bucketMatch: l.details.bucketMatch,
azureStorageEndpoint: l.details.endpoint,
azureStorageAccountName: l.details.accessKey,
azureStorageAccessKey: decryptSecret(creds,
l.details.secretKey),
azureContainerName: l.details.bucketName,
};
}
break;
case 'location-ceph-radosgw-s3-v1':
case 'location-scality-ring-s3-v1':
pathStyle = true; // fallthrough
case 'location-aws-s3-v1':
case 'location-wasabi-v1':
supportsVersioning = true; // fallthrough
case 'location-do-spaces-v1':
location.type = 'aws_s3';
if (l.details.secretKey && l.details.secretKey.length > 0) {
let https = true;
let awsEndpoint = l.details.endpoint ||
's3.amazonaws.com';
if (awsEndpoint.includes('://')) {
const url = new URL(awsEndpoint);
awsEndpoint = url.host;
https = url.protocol.includes('https');
}
location.details = {
credentials: {
accessKey: l.details.accessKey,
secretKey: decryptSecret(creds,
l.details.secretKey),
},
bucketName: l.details.bucketName,
bucketMatch: l.details.bucketMatch,
serverSideEncryption:
Boolean(l.details.serverSideEncryption),
region: l.details.region,
awsEndpoint,
supportsVersioning,
pathStyle,
https,
};
}
break;
case 'location-gcp-v1':
location.type = 'gcp';
if (l.details.secretKey && l.details.secretKey.length > 0) {
location.details = {
credentials: {
accessKey: l.details.accessKey,
secretKey: decryptSecret(creds,
l.details.secretKey),
},
bucketName: l.details.bucketName,
mpuBucketName: l.details.mpuBucketName,
bucketMatch: l.details.bucketMatch,
gcpEndpoint: l.details.endpoint ||
'storage.googleapis.com',
https: true,
};
}
break;
case 'location-scality-sproxyd-v1':
location.type = 'scality';
if (l.details && l.details.bootstrapList &&
l.details.proxyPath) {
location.details = {
supportsVersioning: true,
connector: {
sproxyd: {
chordCos: l.details.chordCos || null,
bootstrap: l.details.bootstrapList,
path: l.details.proxyPath,
},
},
};
}
break;
case 'location-nfs-mount-v1':
location.type = 'pfs';
if (l.details) {
location.details = {
supportsVersioning: true,
bucketMatch: true,
pfsDaemonEndpoint: {
host: `${l.name}-cosmos-pfsd`,
port: 80,
},
};
}
break;
case 'location-scality-hdclient-v2':
location.type = 'scality';
if (l.details && l.details.bootstrapList) {
location.details = {
supportsVersioning: true,
connector: {
hdclient: {
bootstrap: l.details.bootstrapList,
},
},
};
}
break;
default:
log.info(
'unknown location type',
{ locationType: l.locationType },
);
return;
}
location.sizeLimitGB = l.sizeLimitGB || null;
location.isTransient = Boolean(l.isTransient);
location.legacyAwsBehavior = Boolean(l.legacyAwsBehavior);
locations[location.name] = location;
return;
});
return locations;
}
module.exports = {
patchLocations,
};

View File

@ -38,6 +38,10 @@
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:saml-provider/[\\w._-]{1,128}$"
},
"principalFederatedOidcIdp": {
"type": "string",
"pattern": "^(?:http(s)?:\/\/)?[\\w.-]+(?:\\.[\\w\\.-]+)+[\\w\\-\\._~:/?#[\\]@!\\$&'\\(\\)\\*\\+,;=.]+$"
},
"principalAWSItem": {
"type": "object",
"properties": {
@ -98,6 +102,9 @@
"oneOf": [
{
"$ref": "#/definitions/principalFederatedSamlIdp"
},
{
"$ref": "#/definitions/principalFederatedOidcIdp"
}
]
}

View File

@ -50,7 +50,7 @@ evaluators.isResourceApplicable = (requestContext, statementResource, log) => {
requestResourceArr, true);
if (arnSegmentsMatch) {
log.trace('policy resource is applicable to request',
{ requestResource: resource, policyResource });
{ requestResource: resource, policyResource });
return true;
}
continue;
@ -224,21 +224,21 @@ evaluators.evaluatePolicy = (requestContext, policy, log) => {
// in policy, move on to next statement
if (currentStatement.NotResource &&
evaluators.isResourceApplicable(requestContext,
currentStatement.NotResource, log)) {
currentStatement.NotResource, log)) {
continue;
}
// If affirmative action is in policy and request action is not
// applicable, move on to next statement
if (currentStatement.Action &&
!evaluators.isActionApplicable(requestContext.getAction(),
currentStatement.Action, log)) {
currentStatement.Action, log)) {
continue;
}
// If NotAction is in policy and action matches NotAction in policy,
// move on to next statement
if (currentStatement.NotAction &&
evaluators.isActionApplicable(requestContext.getAction(),
currentStatement.NotAction, log)) {
currentStatement.NotAction, log)) {
continue;
}
const conditionEval = currentStatement.Condition ?

View File

@ -30,6 +30,7 @@ const sharedActionMap = {
bypassGovernanceRetention: 's3:BypassGovernanceRetention',
listMultipartUploads: 's3:ListBucketMultipartUploads',
listParts: 's3:ListMultipartUploadParts',
metadataSearch: 's3:MetadataSearch',
multipartDelete: 's3:AbortMultipartUpload',
objectDelete: 's3:DeleteObject',
objectDeleteTagging: 's3:DeleteObjectTagging',
@ -116,6 +117,7 @@ const actionMonitoringMapS3 = {
initiateMultipartUpload: 'CreateMultipartUpload',
listMultipartUploads: 'ListMultipartUploads',
listParts: 'ListParts',
metadataSearch: 'MetadataSearch',
multiObjectDelete: 'DeleteObjects',
multipartDelete: 'AbortMultipartUpload',
objectCopy: 'CopyObject',
@ -159,6 +161,7 @@ const actionMapIAM = {
getPolicyVersion: 'iam:GetPolicyVersion',
getUser: 'iam:GetUser',
listAccessKeys: 'iam:ListAccessKeys',
listEntitiesForPolicy: 'iam:ListEntitiesForPolicy',
listGroupPolicies: 'iam:ListGroupPolicies',
listGroups: 'iam:ListGroups',
listGroupsForUser: 'iam:ListGroupsForUser',

View File

@ -39,11 +39,11 @@ conditions.findConditionKey = (key, requestContext) => {
// (see Boolean Condition Operators).
// Note: This key is only present if MFA was used. So, the following
// will not work:
// "Condition" :
// { "Bool" : { "aws:MultiFactorAuthPresent" : false } }
// "Condition" :
// { "Bool" : { "aws:MultiFactorAuthPresent" : false } }
// Instead use:
// "Condition" :
// { "Null" : { "aws:MultiFactorAuthPresent" : true } }
// "Condition" :
// { "Null" : { "aws:MultiFactorAuthPresent" : true } }
map.set('aws:MultiFactorAuthPresent',
requestContext.getMultiFactorAuthPresent());
// aws:MultiFactorAuthAge Used to check how many seconds since
@ -146,6 +146,8 @@ conditions.findConditionKey = (key, requestContext) => {
map.set('s3:ObjLocationConstraint',
headers['x-amz-meta-scal-location-constraint']);
map.set('sts:ExternalId', requestContext.getRequesterExternalId());
map.set('keycloak:groups', requesterInfo.keycloakGroup);
map.set('keycloak:roles', requesterInfo.keycloakRole);
map.set('iam:PolicyArn', requestContext.getPolicyArn());
// s3:ExistingObjectTag - Used to check that existing object tag has
// specific tag key and value. Extraction of correct tag key is done in CloudServer.
@ -164,8 +166,8 @@ conditions.findConditionKey = (key, requestContext) => {
// so evaluation should be skipped
map.set('s3:RequestObjectTagKeys',
requestContext.getNeedTagEval() && requestContext.getRequestObjTags()
? getTagKeys(requestContext.getRequestObjTags())
: undefined);
? getTagKeys(requestContext.getRequestObjTags())
: undefined);
return map.get(key);
};
@ -189,7 +191,7 @@ function convertSpecialChars(string) {
return map[char];
}
return string.replace(/(\$\{\*\})|(\$\{\?\})|(\$\{\$\})/g,
characterMap);
characterMap);
}
/**
@ -423,10 +425,10 @@ conditions.convertConditionOperator = operator => {
return !operatorMap.ArnLike(key, value);
},
Null: function nullOperator(key, value) {
// Null is used to check if a condition key is present.
// The policy statement value should be either true (the key doesn't
// exist — it is null) or false (the key exists and its value is
// not null).
// Null is used to check if a condition key is present.
// The policy statement value should be either true (the key doesn't
// exist — it is null) or false (the key exists and its value is
// not null).
if ((key === undefined || key === null)
&& value[0] === 'true' ||
(key !== undefined && key !== null)

View File

@ -51,10 +51,10 @@ wildcards.handleWildcardInResource = arn => {
// Wildcards can be part of the resource ARN.
// Wildcards do NOT span segments of the ARN (separated by ":")
// Example: all elements in specific bucket:
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
// ARN format:
// arn:partition:service:region:namespace:relative-id
// Example: all elements in specific bucket:
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
// ARN format:
// arn:partition:service:region:namespace:relative-id
const arnArr = arn.split(':');
return arnArr.map(portion => wildcards.handleWildcards(portion));
};

View File

@ -6,7 +6,6 @@ const crypto = require('crypto');
* data through a stream
*/
class MD5Sum extends Transform {
/**
* @constructor
*/
@ -40,7 +39,6 @@ class MD5Sum extends Transform {
this.emit('hashed');
callback(null);
}
}
module.exports = MD5Sum;

View File

@ -73,7 +73,7 @@ class ResultsCollector extends EventEmitter {
* @property {Error} [results[].error] - error returned by Azure putting subpart
* @property {number} results[].subPartIndex - index of the subpart
*/
/**
/**
* "error" event
* @event ResultCollector#error
* @type {(Error|undefined)} error - error returned by Azure last subpart

View File

@ -94,7 +94,7 @@ azureMpuUtils.getSubPartIds = (part, uploadId) =>
azureMpuUtils.getBlockId(uploadId, part.partNumber, subPartIndex));
azureMpuUtils.putSinglePart = (errorWrapperFn, request, params, dataStoreName,
log, cb) => {
log, cb) => {
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
= params;
const blockId = azureMpuUtils.getBlockId(uploadId, partNumber, 0);
@ -107,31 +107,31 @@ log, cb) => {
request.pipe(passThrough);
return errorWrapperFn('uploadPart', 'createBlockFromStream',
[blockId, bucketName, objectKey, passThrough, size, options,
(err, result) => {
if (err) {
log.error('Error from Azure data backend uploadPart',
{ error: err.message, dataStoreName });
if (err.code === 'ContainerNotFound') {
return cb(errors.NoSuchBucket);
(err, result) => {
if (err) {
log.error('Error from Azure data backend uploadPart',
{ error: err.message, dataStoreName });
if (err.code === 'ContainerNotFound') {
return cb(errors.NoSuchBucket);
}
if (err.code === 'InvalidMd5') {
return cb(errors.InvalidDigest);
}
if (err.code === 'Md5Mismatch') {
return cb(errors.BadDigest);
}
return cb(errors.InternalError.customizeDescription(
`Error returned from Azure: ${err.message}`),
);
}
if (err.code === 'InvalidMd5') {
return cb(errors.InvalidDigest);
}
if (err.code === 'Md5Mismatch') {
return cb(errors.BadDigest);
}
return cb(errors.InternalError.customizeDescription(
`Error returned from Azure: ${err.message}`)
);
}
const md5 = result.headers['content-md5'] || '';
const eTag = objectUtils.getHexMD5(md5);
return cb(null, eTag, size);
}], log, cb);
const md5 = result.headers['content-md5'] || '';
const eTag = objectUtils.getHexMD5(md5);
return cb(null, eTag, size);
}], log, cb);
};
azureMpuUtils.putNextSubPart = (errorWrapperFn, partParams, subPartInfo,
subPartStream, subPartIndex, resultsCollector, log, cb) => {
subPartStream, subPartIndex, resultsCollector, log, cb) => {
const { uploadId, partNumber, bucketName, objectKey } = partParams;
const subPartSize = azureMpuUtils.getSubPartSize(
subPartInfo, subPartIndex);
@ -140,11 +140,11 @@ subPartStream, subPartIndex, resultsCollector, log, cb) => {
resultsCollector.pushOp();
errorWrapperFn('uploadPart', 'createBlockFromStream',
[subPartId, bucketName, objectKey, subPartStream, subPartSize,
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
};
azureMpuUtils.putSubParts = (errorWrapperFn, request, params,
dataStoreName, log, cb) => {
dataStoreName, log, cb) => {
const subPartInfo = azureMpuUtils.getSubPartInfo(params.size);
const resultsCollector = new ResultsCollector();
const hashedStream = new MD5Sum();

View File

@ -31,9 +31,9 @@ convertMethods.listMultipartUploads = xmlParams => {
const l = xmlParams.list;
xml.push('<?xml version="1.0" encoding="UTF-8"?>',
'<ListMultipartUploadsResult ' +
'<ListMultipartUploadsResult ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`,
);
// For certain XML elements, if it is `undefined`, AWS returns either an
@ -58,7 +58,7 @@ convertMethods.listMultipartUploads = xmlParams => {
});
xml.push(`<MaxUploads>${escapeForXml(l.MaxKeys)}</MaxUploads>`,
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`,
);
l.Uploads.forEach(upload => {
@ -69,29 +69,29 @@ convertMethods.listMultipartUploads = xmlParams => {
}
xml.push('<Upload>',
`<Key>${escapeForXml(key)}</Key>`,
`<UploadId>${escapeForXml(val.UploadId)}</UploadId>`,
'<Initiator>',
`<ID>${escapeForXml(val.Initiator.ID)}</ID>`,
`<DisplayName>${escapeForXml(val.Initiator.DisplayName)}` +
`<Key>${escapeForXml(key)}</Key>`,
`<UploadId>${escapeForXml(val.UploadId)}</UploadId>`,
'<Initiator>',
`<ID>${escapeForXml(val.Initiator.ID)}</ID>`,
`<DisplayName>${escapeForXml(val.Initiator.DisplayName)}` +
'</DisplayName>',
'</Initiator>',
'<Owner>',
`<ID>${escapeForXml(val.Owner.ID)}</ID>`,
`<DisplayName>${escapeForXml(val.Owner.DisplayName)}` +
'</Initiator>',
'<Owner>',
`<ID>${escapeForXml(val.Owner.ID)}</ID>`,
`<DisplayName>${escapeForXml(val.Owner.DisplayName)}` +
'</DisplayName>',
'</Owner>',
`<StorageClass>${escapeForXml(val.StorageClass)}` +
'</Owner>',
`<StorageClass>${escapeForXml(val.StorageClass)}` +
'</StorageClass>',
`<Initiated>${escapeForXml(val.Initiated)}</Initiated>`,
'</Upload>'
`<Initiated>${escapeForXml(val.Initiated)}</Initiated>`,
'</Upload>',
);
});
l.CommonPrefixes.forEach(prefix => {
xml.push('<CommonPrefixes>',
`<Prefix>${escapeForXml(prefix)}</Prefix>`,
'</CommonPrefixes>'
`<Prefix>${escapeForXml(prefix)}</Prefix>`,
'</CommonPrefixes>',
);
});

View File

@ -5,7 +5,6 @@ const Readable = require('stream').Readable;
* This class is used to produce zeros filled buffers for a reader consumption
*/
class NullStream extends Readable {
/**
* Construct a new zeros filled buffers producer that will
* produce as much bytes as specified by the range parameter, or the size
@ -32,8 +31,8 @@ class NullStream extends Readable {
_read(size) {
const toRead = Math.min(size, this.bytesToRead);
const buffer = toRead > 0
? Buffer.alloc(toRead, 0)
: null;
? Buffer.alloc(toRead, 0)
: null;
this.bytesToRead -= toRead;
this.push(buffer);
}

View File

@ -4,11 +4,11 @@ const errors = require('../errors');
const escapeForXml = require('./escapeForXml');
const errorInvalidArgument = errors.InvalidArgument
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
'tag name duplicates.');
const errorBadRequestLimit50 = errors.BadRequest
.customizeDescription('Object tags cannot be greater than 50');
.customizeDescription('Object tags cannot be greater than 50');
/*
Format of xml request:
@ -38,7 +38,7 @@ const _validator = {
result.Tagging.TagSet &&
result.Tagging.TagSet.length === 1 &&
(
result.Tagging.TagSet[0] === '' ||
result.Tagging.TagSet[0] === '' ||
result.Tagging.TagSet[0] &&
Object.keys(result.Tagging.TagSet[0]).length === 1 &&
result.Tagging.TagSet[0].Tag &&
@ -155,7 +155,7 @@ function parseTagXml(xml, log, cb) {
function convertToXml(objectTags) {
const xml = [];
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
'<Tagging> <TagSet>');
'<Tagging> <TagSet>');
if (objectTags && Object.keys(objectTags).length > 0) {
Object.keys(objectTags).forEach(key => {
xml.push(`<Tag><Key>${escapeForXml(key)}</Key>` +

View File

@ -68,6 +68,31 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
return res;
}
/**
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
* request against last-modified date of object
* @param {object} headers - headers from request object
* @param {string} lastModified - last modified date of object
* @return {object} contains modifiedSince and unmodifiedSince res objects
*/
function checkDateModifiedHeaders(headers, lastModified) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
lastModifiedDate);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
return { modifiedSinceRes, unmodifiedSinceRes };
}
/**
* validateConditionalHeaders - validates 'if-modified-since',
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
@ -79,23 +104,14 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
* empty object if no error
*/
function validateConditionalHeaders(headers, lastModified, contentMD5) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifMatchHeader = headers['if-match'] ||
headers['x-amz-copy-source-if-match'];
const ifNoneMatchHeader = headers['if-none-match'] ||
headers['x-amz-copy-source-if-none-match'];
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5);
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5);
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
lastModifiedDate);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(headers, lastModified);
// If-Unmodified-Since condition evaluates to false and If-Match
// is not present, then return the error. Otherwise, If-Unmodified-Since is
// silent when If-Match match, and when If-Match does not match, it's the
@ -120,5 +136,6 @@ module.exports = {
_checkEtagNoneMatch,
_checkModifiedSince,
_checkUnmodifiedSince,
checkDateModifiedHeaders,
validateConditionalHeaders,
};

View File

@ -10,6 +10,7 @@ const routeOPTIONS = require('./routes/routeOPTIONS');
const routesUtils = require('./routesUtils');
const routeWebsite = require('./routes/routeWebsite');
const { objectKeyByteLimit } = require('../constants');
const requestUtils = require('../../lib/policyEvaluator/requestUtils');
const routeMap = {
@ -42,7 +43,7 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
log.debug('empty bucket name', { method: 'routes' });
return (method !== 'OPTIONS') ?
errors.MethodNotAllowed : errors.AccessForbidden
.customizeDescription('CORSResponse: Bucket not found');
.customizeDescription('CORSResponse: Bucket not found');
}
if (bucketName !== undefined && routesUtils.isValidBucketName(bucketName,
blacklistedPrefixes.bucket) === false) {
@ -57,8 +58,14 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
blacklistedPrefixes.object);
if (!result.isValid) {
log.debug('invalid object key', { objectKey });
return errors.InvalidArgument.customizeDescription('Object key ' +
`must not start with "${result.invalidPrefix}".`);
if (result.invalidPrefix) {
return errors.InvalidArgument.customizeDescription('Invalid ' +
'prefix - object key cannot start with ' +
`"${result.invalidPrefix}".`);
}
return errors.KeyTooLong.customizeDescription('Object key is too ' +
'long. Maximum number of bytes allowed in keys is ' +
`${objectKeyByteLimit}.`);
}
}
if ((reqQuery.partNumber || reqQuery.uploadId)
@ -85,7 +92,7 @@ function checkTypes(req, res, params, logger, s3config) {
'bad routes param: internalHandlers must be an object');
if (params.statsClient) {
assert.strictEqual(typeof params.statsClient, 'object',
'bad routes param: statsClient must be an object');
'bad routes param: statsClient must be an object');
}
assert(Array.isArray(params.allEndpoints),
'bad routes param: allEndpoints must be an array');
@ -93,13 +100,13 @@ function checkTypes(req, res, params, logger, s3config) {
'bad routes param: allEndpoints must have at least one endpoint');
params.allEndpoints.forEach(endpoint => {
assert.strictEqual(typeof endpoint, 'string',
'bad routes param: each item in allEndpoints must be a string');
'bad routes param: each item in allEndpoints must be a string');
});
assert(Array.isArray(params.websiteEndpoints),
'bad routes param: allEndpoints must be an array');
params.websiteEndpoints.forEach(endpoint => {
assert.strictEqual(typeof endpoint, 'string',
'bad routes param: each item in websiteEndpoints must be a string');
'bad routes param: each item in websiteEndpoints must be a string');
});
assert.strictEqual(typeof params.blacklistedPrefixes, 'object',
'bad routes param: blacklistedPrefixes must be an object');
@ -107,16 +114,16 @@ function checkTypes(req, res, params, logger, s3config) {
'bad routes param: blacklistedPrefixes.bucket must be an array');
params.blacklistedPrefixes.bucket.forEach(pre => {
assert.strictEqual(typeof pre, 'string',
'bad routes param: each blacklisted bucket prefix must be a string');
'bad routes param: each blacklisted bucket prefix must be a string');
});
assert(Array.isArray(params.blacklistedPrefixes.object),
'bad routes param: blacklistedPrefixes.object must be an array');
params.blacklistedPrefixes.object.forEach(pre => {
assert.strictEqual(typeof pre, 'string',
'bad routes param: each blacklisted object prefix must be a string');
'bad routes param: each blacklisted object prefix must be a string');
});
assert.strictEqual(typeof params.dataRetrievalFn, 'function',
'bad routes param: dataRetrievalFn must be a defined function');
assert.strictEqual(typeof params.dataRetrievalParams, 'object',
'bad routes param: dataRetrievalParams must be a defined object');
if (s3config) {
assert.strictEqual(typeof s3config, 'object', 'bad routes param: s3config must be an object');
}
@ -138,7 +145,8 @@ function checkTypes(req, res, params, logger, s3config) {
* @param {string[]} params.blacklistedPrefixes.object - object prefixes
* @param {object} params.unsupportedQueries - object containing true/false
* values for whether queries are supported
* @param {function} params.dataRetrievalFn - function to retrieve data
* @param {function} params.dataRetrievalParams - params to create instance of
* data retrieval function
* @param {RequestLogger} logger - werelogs logger instance
* @param {String} [s3config] - s3 configuration
* @returns {undefined}
@ -153,7 +161,7 @@ function routes(req, res, params, logger, s3config) {
allEndpoints,
websiteEndpoints,
blacklistedPrefixes,
dataRetrievalFn,
dataRetrievalParams,
} = params;
const clientInfo = {
@ -171,10 +179,11 @@ function routes(req, res, params, logger, s3config) {
reqUids = undefined;
}
const log = (reqUids !== undefined ?
logger.newRequestLoggerFromSerializedUids(reqUids) :
logger.newRequestLogger());
logger.newRequestLoggerFromSerializedUids(reqUids) :
logger.newRequestLogger());
if (!req.url.startsWith('/_/healthcheck')) {
if (!req.url.startsWith('/_/healthcheck') &&
!req.url.startsWith('/_/report')) {
log.info('received request', clientInfo);
}
@ -207,7 +216,7 @@ function routes(req, res, params, logger, s3config) {
return routesUtils.responseXMLBody(
errors.InvalidURI.customizeDescription('Could not parse the ' +
'specified URI. Check your restEndpoints configuration.'),
undefined, res, log);
undefined, res, log);
}
log.addDefaultFields({
@ -229,16 +238,17 @@ function routes(req, res, params, logger, s3config) {
if (bucketOrKeyError) {
log.trace('error with bucket or key value',
{ error: bucketOrKeyError });
{ error: bucketOrKeyError });
return routesUtils.responseXMLBody(bucketOrKeyError, null, res, log);
}
// bucket website request
if (websiteEndpoints && websiteEndpoints.indexOf(req.parsedHost) > -1) {
return routeWebsite(req, res, api, log, statsClient, dataRetrievalFn);
return routeWebsite(req, res, api, log, statsClient,
dataRetrievalParams);
}
return method(req, res, api, log, statsClient, dataRetrievalFn);
return method(req, res, api, log, statsClient, dataRetrievalParams);
}
module.exports = routes;

View File

@ -7,7 +7,7 @@ function routeDELETE(request, response, api, log, statsClient) {
if (request.query.uploadId) {
if (request.objectKey === undefined) {
return routesUtils.responseNoBody(
errors.InvalidRequest.customizeDescription('A key must be ' +
errors.InvalidRequest.customizeDescription('A key must be ' +
'specified'), null, response, 200, log);
}
api.callApiMethod('multipartDelete', request, response, log,
@ -19,77 +19,77 @@ function routeDELETE(request, response, api, log, statsClient) {
} else if (request.objectKey === undefined) {
if (request.query.website !== undefined) {
return api.callApiMethod('bucketDeleteWebsite', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} else if (request.query.cors !== undefined) {
return api.callApiMethod('bucketDeleteCors', request, response,
log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} else if (request.query.replication !== undefined) {
return api.callApiMethod('bucketDeleteReplication', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} else if (request.query.lifecycle !== undefined) {
return api.callApiMethod('bucketDeleteLifecycle', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} else if (request.query.policy !== undefined) {
return api.callApiMethod('bucketDeletePolicy', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} else if (request.query.encryption !== undefined) {
return api.callApiMethod('bucketDeleteEncryption', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
}
api.callApiMethod('bucketDelete', request, response, log,
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders, response,
204, log);
});
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders, response,
204, log);
});
} else {
if (request.query.tagging !== undefined) {
return api.callApiMethod('objectDeleteTagging', request,
response, log, (err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders,
response, 204, log);
});
response, log, (err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders,
response, 204, log);
});
}
api.callApiMethod('objectDelete', request, response, log,
(err, corsHeaders) => {
/*
(err, corsHeaders) => {
/*
* Since AWS expects a 204 regardless of the existence of
the object, the errors NoSuchKey and NoSuchVersion should not
* be sent back as a response.
*/
if (err && !err.NoSuchKey && !err.NoSuchVersion) {
return routesUtils.responseNoBody(err, corsHeaders,
response, null, log);
}
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(null, corsHeaders, response,
204, log);
});
if (err && !err.NoSuchKey && !err.NoSuchVersion) {
return routesUtils.responseNoBody(err, corsHeaders,
response, null, log);
}
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(null, corsHeaders, response,
204, log);
});
}
return undefined;
}

View File

@ -1,7 +1,8 @@
const errors = require('../../errors');
const routesUtils = require('../routesUtils');
function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
function routerGET(request, response, api, log, statsClient,
dataRetrievalParams) {
log.debug('routing request', { method: 'routerGET' });
if (request.bucketName === undefined && request.objectKey !== undefined) {
routesUtils.responseXMLBody(errors.NoSuchBucket, null, response, log);
@ -16,18 +17,18 @@ function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
// GET bucket ACL
if (request.query.acl !== undefined) {
api.callApiMethod('bucketGetACL', request, response, log,
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders);
});
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders);
});
} else if (request.query.replication !== undefined) {
api.callApiMethod('bucketGetReplication', request, response, log,
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders);
});
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders);
});
} else if (request.query.cors !== undefined) {
api.callApiMethod('bucketGetCors', request, response, log,
(err, xml, corsHeaders) => {
@ -69,7 +70,7 @@ function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders);
corsHeaders);
});
} else if (request.query.policy !== undefined) {
api.callApiMethod('bucketGetPolicy', request, response, log,
@ -94,11 +95,18 @@ function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
});
} else if (request.query.encryption !== undefined) {
api.callApiMethod('bucketGetEncryption', request, response, log,
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response,
log, corsHeaders);
});
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response,
log, corsHeaders);
});
} else if (request.query.search !== undefined) {
api.callApiMethod('metadataSearch', request, response, log,
(err, xml, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response,
log, corsHeaders);
});
} else {
// GET bucket
api.callApiMethod('bucketGet', request, response, log,
@ -157,8 +165,8 @@ function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
log.end().addDefaultFields({ contentLength });
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseStreamData(err, request.query,
resMetaHeaders, dataGetInfo, dataRetrievalFn, response,
range, log);
resMetaHeaders, dataGetInfo, dataRetrievalParams,
response, range, log);
});
}
}

View File

@ -21,11 +21,11 @@ function routeOPTIONS(request, response, api, log, statsClient) {
}
return api.callApiMethod('corsPreflight', request, response, log,
(err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders, response, 200,
log);
});
(err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders, response, 200,
log);
});
}
module.exports = routeOPTIONS;

View File

@ -27,28 +27,28 @@ function routePOST(request, response, api, log) {
if (request.query.uploads !== undefined) {
return api.callApiMethod('initiateMultipartUpload', request,
response, log, (err, result, corsHeaders) =>
routesUtils.responseXMLBody(err, result, response, log,
corsHeaders));
routesUtils.responseXMLBody(err, result, response, log,
corsHeaders));
}
// POST complete multipart upload
if (request.query.uploadId !== undefined) {
return api.callApiMethod('completeMultipartUpload', request,
response, log, (err, result, resHeaders) =>
routesUtils.responseXMLBody(err, result, response, log,
resHeaders));
routesUtils.responseXMLBody(err, result, response, log,
resHeaders));
}
// POST multiObjectDelete
if (request.query.delete !== undefined) {
return api.callApiMethod('multiObjectDelete', request, response,
log, (err, xml, corsHeaders) =>
routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders));
routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders));
}
return routesUtils.responseNoBody(errors.NotImplemented, null, response,
200, log);
200, log);
}
/* eslint-enable no-param-reassign */
module.exports = routePOST;

View File

@ -14,16 +14,16 @@ function routePUT(request, response, api, log, statsClient) {
|| contentLength < 0)) || contentLength === '') {
log.debug('invalid content-length header');
return routesUtils.responseNoBody(
errors.BadRequest, null, response, null, log);
errors.BadRequest, null, response, null, log);
}
// PUT bucket ACL
if (request.query.acl !== undefined) {
api.callApiMethod('bucketPutACL', request, response, log,
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
} else if (request.query.versioning !== undefined) {
api.callApiMethod('bucketPutVersioning', request, response, log,
(err, corsHeaders) => {
@ -82,11 +82,11 @@ function routePUT(request, response, api, log, statsClient) {
});
} else if (request.query.encryption !== undefined) {
api.callApiMethod('bucketPutEncryption', request, response, log,
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
} else {
// PUT bucket
return api.callApiMethod('bucketPut', request, response, log,
@ -110,7 +110,7 @@ function routePUT(request, response, api, log, statsClient) {
method: 'routePUT',
});
return routesUtils
.responseNoBody(errors.InvalidDigest, null, response, 200, log);
.responseNoBody(errors.InvalidDigest, null, response, 200, log);
}
if (request.headers['content-md5']) {
request.contentMD5 = request.headers['content-md5'];
@ -126,17 +126,17 @@ function routePUT(request, response, api, log, statsClient) {
});
return routesUtils
.responseNoBody(errors.InvalidDigest, null, response, 200,
log);
log);
}
}
if (request.query.partNumber) {
if (request.headers['x-amz-copy-source']) {
api.callApiMethod('objectPutCopyPart', request, response, log,
(err, xml, additionalHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
(err, xml, additionalHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseXMLBody(err, xml, response, log,
additionalHeaders);
});
});
} else {
api.callApiMethod('objectPutPart', request, response, log,
(err, calculatedHash, corsHeaders) => {
@ -202,11 +202,11 @@ function routePUT(request, response, api, log, statsClient) {
contentLength: request.parsedContentLength,
});
api.callApiMethod('objectPut', request, response, log,
(err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders,
response, 200, log);
});
(err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders,
response, 200, log);
});
}
}
return undefined;

View File

@ -2,7 +2,7 @@ const errors = require('../../errors');
const routesUtils = require('../routesUtils');
function routerWebsite(request, response, api, log, statsClient,
dataRetrievalFn) {
dataRetrievalParams) {
log.debug('routing request', { method: 'routerWebsite' });
// website endpoint only supports GET and HEAD and must have a bucket
// http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html
@ -14,7 +14,7 @@ function routerWebsite(request, response, api, log, statsClient,
if (request.method === 'GET') {
return api.callApiMethod('websiteGet', request, response, log,
(err, userErrorPageFailure, dataGetInfo, resMetaHeaders,
redirectInfo, key) => {
redirectInfo, key) => {
routesUtils.statsReport500(err, statsClient);
// request being redirected
if (redirectInfo) {
@ -27,7 +27,7 @@ function routerWebsite(request, response, api, log, statsClient,
// user has their own error page
if (err && dataGetInfo) {
return routesUtils.streamUserErrorPage(err, dataGetInfo,
dataRetrievalFn, response, resMetaHeaders, log);
dataRetrievalParams, response, resMetaHeaders, log);
}
// send default error html response
if (err) {
@ -37,27 +37,27 @@ function routerWebsite(request, response, api, log, statsClient,
}
// no error, stream data
return routesUtils.responseStreamData(null, request.query,
resMetaHeaders, dataGetInfo, dataRetrievalFn, response,
resMetaHeaders, dataGetInfo, dataRetrievalParams, response,
null, log);
});
}
if (request.method === 'HEAD') {
return api.callApiMethod('websiteHead', request, response, log,
(err, resMetaHeaders, redirectInfo, key) => {
routesUtils.statsReport500(err, statsClient);
if (redirectInfo) {
return routesUtils.redirectRequest(redirectInfo,
key, request.connection.encrypted,
response, request.headers.host, resMetaHeaders, log);
}
// could redirect on err so check for redirectInfo first
if (err) {
return routesUtils.errorHeaderResponse(err, response,
resMetaHeaders, log);
}
return routesUtils.responseContentHeaders(err, {}, resMetaHeaders,
response, log);
});
(err, resMetaHeaders, redirectInfo, key) => {
routesUtils.statsReport500(err, statsClient);
if (redirectInfo) {
return routesUtils.redirectRequest(redirectInfo,
key, request.connection.encrypted,
response, request.headers.host, resMetaHeaders, log);
}
// could redirect on err so check for redirectInfo first
if (err) {
return routesUtils.errorHeaderResponse(err, response,
resMetaHeaders, log);
}
return routesUtils.responseContentHeaders(err, {}, resMetaHeaders,
response, log);
});
}
return undefined;
}

View File

@ -4,6 +4,9 @@ const errors = require('../errors');
const constants = require('../constants');
const { eachSeries } = require('async');
const DataWrapper = require('../storage/data/DataWrapper');
const { objectKeyByteLimit } = require('../constants');
const responseErr = new Error();
responseErr.code = 'ResponseError';
responseErr.message = 'response closed by client request before all data sent';
@ -25,7 +28,7 @@ function setCommonResponseHeaders(headers, response, log) {
} catch (e) {
log.debug('header can not be added ' +
'to the response', { header: headers[key],
error: e.stack, method: 'setCommonResponseHeaders' });
error: e.stack, method: 'setCommonResponseHeaders' });
}
}
});
@ -68,7 +71,7 @@ const XMLResponseBackend = {
* @return {object} response - response object with additional headers
*/
okResponse: function okXMLResponse(xml, response, log,
additionalHeaders) {
additionalHeaders) {
const bytesSent = Buffer.byteLength(xml);
log.trace('sending success xml response');
log.addDefaultFields({
@ -115,7 +118,7 @@ const XMLResponseBackend = {
`<Message>${errCode.description}</Message>`,
'<Resource></Resource>',
`<RequestId>${log.getSerializedUids()}</RequestId>`,
'</Error>'
'</Error>',
);
const xmlStr = xml.join('');
const bytesSent = Buffer.byteLength(xmlStr);
@ -145,7 +148,7 @@ const JSONResponseBackend = {
* @return {object} response - response object with additional headers
*/
okResponse: function okJSONResponse(json, response, log,
additionalHeaders) {
additionalHeaders) {
const bytesSent = Buffer.byteLength(json);
log.trace('sending success json response');
log.addDefaultFields({
@ -163,7 +166,7 @@ const JSONResponseBackend = {
},
errorResponse: function errorJSONResponse(errCode, response, log,
corsHeaders) {
corsHeaders) {
log.trace('sending error json response', { errCode });
/*
{
@ -257,7 +260,7 @@ function okContentHeadersResponse(overrideParams, resHeaders,
return response;
}
function retrieveDataAzure(locations, retrieveDataFn, response, logger) {
function retrieveDataAzure(locations, retrieveDataParams, response, logger) {
const errorHandlerFn = () => { response.connection.destroy(); };
const current = locations.shift();
@ -265,7 +268,18 @@ function retrieveDataAzure(locations, retrieveDataFn, response, logger) {
logger.error('error piping data from source');
errorHandlerFn(err);
});
return retrieveDataFn(current, response, logger, err => {
const {
client,
implName,
config,
kms,
metadata,
locStorageCheckFn,
vault,
} = retrieveDataParams;
const data = new DataWrapper(
client, implName, config, kms, metadata, locStorageCheckFn, vault);
return data.get(current, response, logger, err => {
if (err) {
logger.error('failed to get object from source', {
error: err,
@ -278,12 +292,12 @@ function retrieveDataAzure(locations, retrieveDataFn, response, logger) {
});
}
function retrieveData(locations, retrieveDataFn, response, log) {
function retrieveData(locations, retrieveDataParams, response, log) {
if (locations.length === 0) {
return response.end();
}
if (locations[0].azureStreamingOptions) {
return retrieveDataAzure(locations, retrieveDataFn, response, log);
return retrieveDataAzure(locations, retrieveDataParams, response, log);
}
// response is of type http.ServerResponse
let responseDestroyed = false;
@ -293,16 +307,33 @@ function retrieveData(locations, retrieveDataFn, response, log) {
response.destroy();
responseDestroyed = true;
};
const _destroyReadable = readable => {
// s3-data sends Readable stream only which does not implement destroy
if (readable && readable.destroy) {
readable.destroy();
}
};
// the S3-client might close the connection while we are processing it
response.once('close', () => {
responseDestroyed = true;
if (currentStream) {
currentStream.destroy();
}
_destroyReadable(currentStream);
});
const {
client,
implName,
config,
kms,
metadata,
locStorageCheckFn,
vault,
} = retrieveDataParams;
const data = new DataWrapper(
client, implName, config, kms, metadata, locStorageCheckFn, vault);
return eachSeries(locations,
(current, next) => retrieveDataFn(current, response, log,
(current, next) => data.get(current, response, log,
(err, readable) => {
// NB: readable is of IncomingMessage type
if (err) {
@ -319,7 +350,7 @@ function retrieveData(locations, retrieveDataFn, response, log) {
if (responseDestroyed || response.isclosed) {
log.debug(
'response destroyed before readable could stream');
readable.destroy();
_destroyReadable(readable);
return next(responseErr);
}
// readable stream successfully consumed
@ -337,27 +368,27 @@ function retrieveData(locations, retrieveDataFn, response, log) {
currentStream = readable;
return readable.pipe(response, { end: false });
}), err => {
currentStream = null;
if (err) {
log.debug('abort response due to error', {
error: err.code, errMsg: err.message });
}
// call end for all cases (error/success) per node.js docs
// recommendation
response.end();
currentStream = null;
if (err) {
log.debug('abort response due to error', {
error: err.code, errMsg: err.message });
}
// call end for all cases (error/success) per node.js docs
// recommendation
response.end();
},
);
}
function _responseBody(responseBackend, errCode, payload, response, log,
additionalHeaders) {
additionalHeaders) {
if (errCode && !response.headersSent) {
return responseBackend.errorResponse(errCode, response, log,
additionalHeaders);
additionalHeaders);
}
if (!response.headersSent) {
return responseBackend.okResponse(payload, response, log,
additionalHeaders);
additionalHeaders);
}
return undefined;
}
@ -366,8 +397,8 @@ function _computeContentLengthFromLocation(dataLocations) {
return dataLocations.reduce(
(sum, location) => (sum !== undefined &&
(typeof location.size === 'number' || typeof location.size === 'string') ?
sum + Number.parseInt(location.size, 10) :
undefined), 0);
sum + Number.parseInt(location.size, 10) :
undefined), 0);
}
function _contentLengthMatchesLocations(contentLength, dataLocations) {
@ -388,7 +419,7 @@ const routesUtils = {
*/
responseXMLBody(errCode, xml, response, log, additionalHeaders) {
return _responseBody(XMLResponseBackend, errCode, xml, response,
log, additionalHeaders);
log, additionalHeaders);
},
/**
@ -402,7 +433,7 @@ const routesUtils = {
*/
responseJSONBody(errCode, json, response, log, additionalHeaders) {
return _responseBody(JSONResponseBackend, errCode, json, response,
log, additionalHeaders);
log, additionalHeaders);
},
/**
@ -417,7 +448,7 @@ const routesUtils = {
responseNoBody(errCode, resHeaders, response, httpCode = 200, log) {
if (errCode && !response.headersSent) {
return XMLResponseBackend.errorResponse(errCode, response, log,
resHeaders);
resHeaders);
}
if (!response.headersSent) {
return okHeaderResponse(resHeaders, response, httpCode, log);
@ -435,10 +466,10 @@ const routesUtils = {
* @return {object} - router's response object
*/
responseContentHeaders(errCode, overrideParams, resHeaders, response,
log) {
log) {
if (errCode && !response.headersSent) {
return XMLResponseBackend.errorResponse(errCode, response, log,
resHeaders);
resHeaders);
}
if (!response.headersSent) {
// Undefined added as an argument since need to send range to
@ -461,7 +492,8 @@ const routesUtils = {
* @param {array | null} dataLocations --
* - array of locations to get streams from sproxyd
* - null if no data for object and only metadata
* @param {function} retrieveDataFn - function to handle streaming data
* @param {object} retrieveDataParams - params to create instance of data
* retrieval function
* @param {http.ServerResponse} response - response sent to the client
* @param {array | undefined} range - range in format of [start, end]
* if range header contained in request or undefined if not
@ -469,10 +501,10 @@ const routesUtils = {
* @return {undefined}
*/
responseStreamData(errCode, overrideParams, resHeaders, dataLocations,
retrieveDataFn, response, range, log) {
retrieveDataParams, response, range, log) {
if (errCode && !response.headersSent) {
return XMLResponseBackend.errorResponse(errCode, response, log,
resHeaders);
resHeaders);
}
if (dataLocations !== null && !response.headersSent) {
// sanity check of content length against individual data
@ -480,13 +512,13 @@ const routesUtils = {
const contentLength = resHeaders && resHeaders['Content-Length'];
if (contentLength !== undefined &&
!_contentLengthMatchesLocations(contentLength,
dataLocations)) {
dataLocations)) {
log.error('logic error: total length of fetched data ' +
'locations does not match returned content-length',
{ contentLength, dataLocations });
{ contentLength, dataLocations });
return XMLResponseBackend.errorResponse(errors.InternalError,
response, log,
resHeaders);
response, log,
resHeaders);
}
}
if (!response.headersSent) {
@ -505,20 +537,21 @@ const routesUtils = {
httpCode: response.statusCode,
});
});
return retrieveData(dataLocations, retrieveDataFn, response, log);
return retrieveData(dataLocations, retrieveDataParams, response, log);
},
/**
* @param {object} err -- arsenal error object
* @param {array} dataLocations --
* - array of locations to get streams from backend
* @param {function} retrieveDataFn - function to handle streaming data
* @param {object} retrieveDataParams - params to create instance of
* data retrieval function
* @param {http.ServerResponse} response - response sent to the client
* @param {object} corsHeaders - CORS-related response headers
* @param {object} log - Werelogs logger
* @return {undefined}
*/
streamUserErrorPage(err, dataLocations, retrieveDataFn, response,
streamUserErrorPage(err, dataLocations, retrieveDataParams, response,
corsHeaders, log) {
setCommonResponseHeaders(corsHeaders, response, log);
response.writeHead(err.code, { 'Content-type': 'text/html' });
@ -527,7 +560,7 @@ const routesUtils = {
httpCode: response.statusCode,
});
});
return retrieveData(dataLocations, retrieveDataFn, response, log);
return retrieveData(dataLocations, retrieveDataParams, response, log);
},
/**
@ -558,7 +591,7 @@ const routesUtils = {
`<h1>${err.code} ${response.statusMessage}</h1>`,
'<ul>',
`<li>Code: ${err.message}</li>`,
`<li>Message: ${err.description}</li>`
`<li>Message: ${err.description}</li>`,
);
if (!userErrorPageFailure && bucketName) {
@ -568,7 +601,7 @@ const routesUtils = {
`<li>RequestId: ${log.getSerializedUids()}</li>`,
// AWS response contains HostId here.
// TODO: consider adding
'</ul>'
'</ul>',
);
if (userErrorPageFailure) {
html.push(
@ -578,13 +611,13 @@ const routesUtils = {
'<ul>',
`<li>Code: ${err.message}</li>`,
`<li>Message: ${err.description}</li>`,
'</ul>'
'</ul>',
);
}
html.push(
'<hr/>',
'</body>',
'</html>'
'</html>',
);
return response.end(html.join(''), 'utf8', () => {
@ -806,7 +839,7 @@ const routesUtils = {
// most specific potential hostname
bucketName =
potentialBucketName.length < bucketName.length ?
potentialBucketName : bucketName;
potentialBucketName : bucketName;
}
}
}
@ -814,7 +847,7 @@ const routesUtils = {
return bucketName;
}
throw new Error(
`bad request: hostname ${host} is not in valid endpoints`
`bad request: hostname ${host} is not in valid endpoints`,
);
},
@ -881,6 +914,9 @@ const routesUtils = {
if (invalidPrefix) {
return { isValid: false, invalidPrefix };
}
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
return { isValid: false };
}
return { isValid: true };
},

View File

@ -79,9 +79,9 @@ class ListRecordStream extends stream.Readable {
'did not encounter the last saved offset in oplog, ' +
'resuming processing right after the latest record ' +
'to date; some entries may have been skipped', {
lastSavedID: this._lastSavedID,
latestRecordID: this._latestOplogID,
});
lastSavedID: this._lastSavedID,
latestRecordID: this._latestOplogID,
});
this._unpublishedListing = true;
}
++this._skipCount;

View File

@ -552,11 +552,11 @@ class MongoClientInterface {
$exists: false,
},
},
{
'value.versionId': {
$gt: objVal.versionId,
},
{
'value.versionId': {
$gt: objVal.versionId,
},
},
],
};
// values to update master

View File

@ -29,5 +29,4 @@ server.start(() => {
logger.info('Metadata Proxy Server successfully started. ' +
`Using the ${metadataWrapper.implName} backend`);
});
```

View File

@ -60,8 +60,8 @@ class TestMatrix {
this.elementsToSpecialize = elementsToSpecialize;
this.callback = callback;
this.description = typeof description === 'undefined'
? ''
: description;
? ''
: description;
return this;
}
@ -158,15 +158,15 @@ class TestMatrix {
const callFunction = (matrixFather, matrixChild, callback,
description) => {
const result = Object.keys(matrixChild.params)
.every(currentKey =>
Object.prototype.toString.call(
matrixChild.params[currentKey]
.every(currentKey =>
Object.prototype.toString.call(
matrixChild.params[currentKey],
).indexOf('Array') === -1);
if (result === true) {
describe(matrixChild.serialize(), () => {
it(description,
done => callback(matrixChild, done));
done => callback(matrixChild, done));
});
} else {
describe(matrixChild.serialize(), () => {

View File

@ -247,7 +247,7 @@ function decode(str) {
}
module.exports = { generateVersionId, getInfVid,
hexEncode, hexDecode,
base62Encode, base62Decode,
encode, decode,
ENC_TYPE_HEX, ENC_TYPE_BASE62 };
hexEncode, hexDecode,
base62Encode, base62Decode,
encode, decode,
ENC_TYPE_HEX, ENC_TYPE_BASE62 };

View File

@ -84,7 +84,7 @@ class VersioningRequestProcessor {
return callback(null, data);
}
logger.debug('master version is a PHD, getting the latest version',
{ db, key });
{ db, key });
// otherwise, need to search for the latest version
return this.getByListing(request, logger, callback);
});
@ -187,7 +187,7 @@ class VersioningRequestProcessor {
return entry.callback(err, value);
}
return this.wgm.get(entry.request, entry.logger,
entry.callback);
entry.callback);
});
delete this.queue[cacheKey];
}
@ -267,19 +267,19 @@ class VersioningRequestProcessor {
return callback(err);
}
return this.writeCache.batch({ db, array, options },
logger, err => callback(err, `{"versionId":"${vid}"}`));
logger, err => callback(err, `{"versionId":"${vid}"}`));
};
if (versionId) {
return this.processVersionSpecificPut(request, logger,
versioningCb);
versioningCb);
}
if (versioning) {
return this.processNewVersionPut(request, logger, versioningCb);
}
// no versioning or versioning configuration off
return this.writeCache.batch({ db, array: [{ key, value }] },
logger, callback);
logger, callback);
}
/**
@ -353,7 +353,7 @@ class VersioningRequestProcessor {
if (!(options && options.versionId)) {
return this.writeCache.batch({ db,
array: [{ key, type: 'del' }] },
logger, callback);
logger, callback);
}
// version specific DELETE
return this.processVersionSpecificDelete(request, logger,
@ -399,7 +399,7 @@ class VersioningRequestProcessor {
const cacheKey = formatCacheKey(db, key);
clearTimeout(this.repairing[cacheKey]);
this.repairing[cacheKey] = setTimeout(() =>
this.getByListing(request, logger, () => {}), 15000);
this.getByListing(request, logger, () => {}), 15000);
}
return callback(null, ops, versionId);
});

View File

@ -3,14 +3,14 @@
"engines": {
"node": ">=16"
},
"version": "7.10.15",
"version": "8.1.39",
"description": "Common utilities for the S3 project components",
"main": "build/index.js",
"repository": {
"type": "git",
"url": "git+https://github.com/scality/Arsenal.git"
},
"author": "Giorgio Regni",
"author": "Scality Inc.",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/scality/Arsenal/issues"
@ -21,17 +21,18 @@
"JSONStream": "^1.0.0",
"agentkeepalive": "^4.1.3",
"ajv": "6.12.2",
"async": "~2.1.5",
"async": "~2.6.1",
"aws-sdk": "2.80.0",
"azure-storage": "2.10.3",
"backo": "^1.1.0",
"base-x": "3.0.8",
"base62": "2.0.1",
"bson": "4.0.0",
"debug": "~2.6.9",
"debug": "~4.1.0",
"diskusage": "^1.1.1",
"fcntl": "github:scality/node-fcntl#0.2.0",
"hdclient": "scality/hdclient#1.1.0",
"https-proxy-agent": "^2.2.0",
"ioredis": "^4.28.5",
"ipaddr.js": "1.9.1",
"level": "~5.0.1",
@ -39,11 +40,11 @@
"mongodb": "^3.0.1",
"node-forge": "^0.7.1",
"prom-client": "10.2.3",
"simple-glob": "^0.2",
"socket.io": "~2.3.0",
"socket.io-client": "~2.3.0",
"simple-glob": "^0.2.0",
"socket.io": "2.4.1",
"socket.io-client": "2.4.0",
"sproxydclient": "github:scality/sproxydclient#8.0.3",
"utf8": "2.1.2",
"utf8": "3.0.0",
"uuid": "^3.0.1",
"werelogs": "scality/werelogs#8.1.0",
"xml2js": "~0.4.23"
@ -57,13 +58,13 @@
"@sinonjs/fake-timers": "^6.0.1",
"@types/jest": "^27.4.1",
"@types/node": "^17.0.21",
"eslint": "2.13.1",
"eslint": "^8.9.0",
"eslint-config-airbnb": "6.2.0",
"eslint-config-scality": "scality/Guidelines#7.10.2",
"eslint-config-scality": "scality/Guidelines#ec33dfb",
"eslint-plugin-react": "^4.3.0",
"jest": "^27.5.1",
"mocha": "8.0.1",
"mongodb-memory-server": "^6.0.2",
"nyc": "^15.1.0",
"sinon": "^9.0.2",
"temp": "0.9.1",
"ts-jest": "^27.1.3",
@ -77,11 +78,15 @@
"test": "jest tests/unit",
"build": "tsc",
"prepare": "yarn build || true",
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit"
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit",
"coverage": "nyc --clean jest tests --coverage --testTimeout=120000 --forceExit"
},
"private": true,
"jest": {
"private": true,
"maxWorkers": 1,
"coverageReporters": [
"json"
],
"collectCoverageFrom": [
"lib/**/*.{js,ts}",
"index.js"
@ -94,5 +99,12 @@
}
}
}
},
"nyc": {
"tempDirectory": "coverage",
"reporter": [
"lcov",
"text"
]
}
}

View File

@ -43,28 +43,28 @@ describe('KMIP High Level Driver', () => {
it('should work with' +
` x-name attribute: ${!!bucketNameAttributeName},` +
` compound creation: ${compoundCreateActivate}`,
done => {
const kmipClient = new KMIPClient(options, TTLVCodec,
LoopbackServerTransport);
const plaintext = Buffer.from(crypto.randomBytes(32));
async.waterfall([
next => kmipClient.createBucketKey('plop', logger, next),
(id, next) =>
kmipClient.cipherDataKey(1, id, plaintext,
logger, (err, ciphered) => {
next(err, id, ciphered);
}),
(id, ciphered, next) =>
kmipClient.decipherDataKey(
1, id, ciphered, logger, (err, deciphered) => {
assert(plaintext
.compare(deciphered) === 0);
next(err, id);
}),
(id, next) =>
kmipClient.destroyBucketKey(id, logger, next),
], done);
});
done => {
const kmipClient = new KMIPClient(options, TTLVCodec,
LoopbackServerTransport);
const plaintext = Buffer.from(crypto.randomBytes(32));
async.waterfall([
next => kmipClient.createBucketKey('plop', logger, next),
(id, next) =>
kmipClient.cipherDataKey(1, id, plaintext,
logger, (err, ciphered) => {
next(err, id, ciphered);
}),
(id, ciphered, next) =>
kmipClient.decipherDataKey(
1, id, ciphered, logger, (err, deciphered) => {
assert(plaintext
.compare(deciphered) === 0);
next(err, id);
}),
(id, next) =>
kmipClient.destroyBucketKey(id, logger, next),
], done);
});
});
});
it('should succeed healthcheck with working KMIP client and server', done => {
@ -84,7 +84,7 @@ describe('KMIP High Level Driver', () => {
},
};
const kmipClient = new KMIPClient(options, TTLVCodec,
LoopbackServerTransport);
LoopbackServerTransport);
kmipClient.healthcheck(logger, err => {
assert.ifError(err);
done();

View File

@ -36,17 +36,17 @@ describe('KMIP Low Level Driver', () => {
const kmip = new KMIP(TTLVCodec, MirrorTransport, options);
const requestPayload = fixture.payload(kmip);
kmip.request(logger, fixture.operation,
requestPayload, (err, response) => {
if (err) {
return done(err);
}
const responsePayload = response.lookup(
'Response Message/Batch Item/Response Payload'
)[0];
assert.deepStrictEqual(responsePayload,
requestPayload);
return done();
});
requestPayload, (err, response) => {
if (err) {
return done(err);
}
const responsePayload = response.lookup(
'Response Message/Batch Item/Response Payload',
)[0];
assert.deepStrictEqual(responsePayload,
requestPayload);
return done();
});
});
});
});

View File

@ -14,68 +14,68 @@ describe('KMIP Transport Template Class', () => {
requestNumbers.forEach(iterations => {
it(`should survive ${iterations} iterations` +
` with ${pipelineDepth}way pipeline`,
done => {
const transport = new TransportTemplate(
new EchoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
async.times(iterations, (n, next) => {
transport.send(logger, request,
(err, conversation, response) => {
if (err) {
return next(err);
}
if (request.compare(response) !== 0) {
return next(Error('arg'));
}
return next();
});
}, err => {
transport.end();
done(err);
});
});
done => {
const transport = new TransportTemplate(
new EchoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
async.times(iterations, (n, next) => {
transport.send(logger, request,
(err, conversation, response) => {
if (err) {
return next(err);
}
if (request.compare(response) !== 0) {
return next(Error('arg'));
}
return next();
});
}, err => {
transport.end();
done(err);
});
});
[true, false].forEach(doEmit => {
it('should report errors to outstanding requests.' +
` w:${pipelineDepth}, i:${iterations}, e:${doEmit}`,
done => {
const echoChannel = new EchoChannel;
echoChannel.clog();
const transport = new TransportTemplate(
echoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
/* Using a for loop here instead of anything
done => {
const echoChannel = new EchoChannel;
echoChannel.clog();
const transport = new TransportTemplate(
echoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
/* Using a for loop here instead of anything
* asynchronous, the callbacks get stuck in
* the conversation queue and are unwind with
* an error. It is the purpose of this test */
for (let i = 0; i < iterations; ++i) {
transport.send(
logger, request,
(err, conversation, response) => {
assert(err);
assert(!response);
});
}
if (doEmit) {
echoChannel.emit('error', new Error('awesome'));
} else {
transport.abortPipeline(echoChannel);
}
transport.end();
done();
});
for (let i = 0; i < iterations; ++i) {
transport.send(
logger, request,
(err, conversation, response) => {
assert(err);
assert(!response);
});
}
if (doEmit) {
echoChannel.emit('error', new Error('awesome'));
} else {
transport.abortPipeline(echoChannel);
}
transport.end();
done();
});
});
});
});

View File

@ -0,0 +1,463 @@
const async = require('async');
const assert = require('assert');
const sinon = require('sinon');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo');
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = require('../../../../lib/versioning/VersionID').generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const replicationGroupId = 'RG001';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27018 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.deleteObjectMD', () => {
let metadata;
let collection;
function getObjectCount(cb) {
collection.countDocuments((err, count) => {
if (err) {
cb(err);
}
cb(null, count);
});
}
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}, (err, doc) => {
if (err) {
return cb(err);
}
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
});
}
beforeAll(done => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27018',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should delete non versioned object ${variation.vFormat}`, done => {
const params = {
objName: 'non-deleted-object',
objVal: {
key: 'non-deleted-object',
versionId: 'null',
},
};
const versionParams = {
versioning: false,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we put the master version of object
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we put the master version of a second object
params.objName = 'object-to-deleted';
params.objVal.key = 'object-to-deleted';
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// We delete the first object
metadata.deleteObjectMD(BUCKET_NAME, params.objName, null, logger, next);
},
next => {
// Object must be removed
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// only 1 object remaining in db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
});
},
], done);
});
it(`Should not throw error when object non existent ${variation.vFormat}`, done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, null);
return done();
});
});
it(`Should not throw error when bucket non existent ${variation.vFormat}`, done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, null);
return done();
});
});
it(`Master should not be updated when non lastest version is deleted ${variation.vFormat}`, done => {
let versionId1 = null;
const params = {
objName: 'test-object',
objVal: {
key: 'test-object',
versionId: 'null',
},
vFormat: 'v0',
};
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we start by creating a new version and master
versionId1 = generateVersionId(this.replicationGroupId);
params.versionId = versionId1;
params.objVal.versionId = versionId1;
versionParams.versionId = versionId1;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we create a second version of the same object (master is updated)
params.objVal.versionId = 'version2';
versionParams.versionId = null;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we delete the first version
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 },
logger, next);
},
next => {
// the first version should no longer be available
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// master must be containing second version metadata
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.notStrictEqual(data.versionId, versionId1);
return next();
});
},
next => {
// master and one version remaining in db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
});
},
], done);
});
it(`Master should be updated when last version is deleted ${variation.vFormat}`, done => {
let versionId1;
let versionId2;
const params = {
objName: 'test-object',
objVal: {
key: 'test-object',
versionId: 'null',
isLast: false,
},
};
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we start by creating a new version and master
versionId1 = generateVersionId(this.replicationGroupId);
params.versionId = versionId1;
params.objVal.versionId = versionId1;
versionParams.versionId = versionId1;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we create a second version of the same object (master is updated)
// params.objVal.versionId = 'version2';
// versionParams.versionId = null;
versionId2 = generateVersionId(this.replicationGroupId);
params.versionId = versionId2;
params.objVal.versionId = versionId2;
versionParams.versionId = versionId2;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// deleting latest version
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 },
logger, next);
},
next => {
// latest version must be removed
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// master must be updated to contain first version data
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.versionId, versionId1);
return next();
});
},
next => {
// one master and version in the db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
});
},
], done);
});
it(`Should fail when version id non existent ${variation.vFormat}`, done => {
const versionId = generateVersionId(this.replicationGroupId);
const objName = 'test-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, { versionId }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
itOnlyInV1(`Should create master when delete marker removed ${variation.vFormat}`, done => {
const objVal = {
key: 'test-object',
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let firstVersionVersionId;
let deleteMarkerVersionId;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
firstVersionVersionId = JSON.parse(res).versionId;
return next();
}),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
deleteMarkerVersionId = JSON.parse(res).versionId;
return next();
});
},
next => {
// using fake clock to override the setTimeout used by the repair
const clock = sinon.useFakeTimers();
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId: deleteMarkerVersionId },
logger, () => {
// running the repair callback
clock.runAll();
clock.restore();
return next();
});
},
// waiting for the repair callback to finish
next => setTimeout(next, 100),
// master should be created
next => {
getObject('\x7fMtest-object', (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, 'test-object');
assert.strictEqual(object.versionId, firstVersionVersionId);
assert.strictEqual(object.isDeleteMarker, false);
return next();
});
},
], done);
});
itOnlyInV1(`Should delete master when delete marker becomes last version ${variation.vFormat}`, done => {
const objVal = {
key: 'test-object',
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next);
},
// putting new version on top of delete marker
next => {
objVal.isDeleteMarker = false;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next();
});
},
next => {
// using fake clock to override the setTimeout used by the repair
const clock = sinon.useFakeTimers();
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId },
logger, () => {
// running the repair callback
clock.runAll();
clock.restore();
return next();
});
},
// waiting for the repair callback to finish
next => setTimeout(next, 100),
// master must be deleted
next => {
getObject('\x7fMtest-object', err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
], done);
});
});
});
});

View File

@ -0,0 +1,283 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo');
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = versioning.VersionID.generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { formatMasterKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const replicationGroupId = 'RG001';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27019 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.getObjectMD', () => {
let metadata;
let collection;
let versionId1;
let versionId2;
let params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
const mKey = formatMasterKey(objName, vFormat);
collection.updateOne(
{
_id: mKey,
$or: [{
'value.versionId': {
$exists: false,
},
},
{
'value.versionId': {
$gt: versionId,
},
},
],
},
{
$set: { _id: mKey, value: objVal },
},
{ upsert: true },
err => {
if (err) {
return cb(err);
}
return cb(null);
});
}
beforeAll(done => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
versionId1 = JSON.parse(res).versionId;
return next(null);
});
},
next => {
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
versionId2 = JSON.parse(res).versionId;
return next(null);
});
},
], done);
});
afterEach(done => {
// reset params
params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should return latest version of object ${variation.it}`, done =>
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId2);
return done();
}));
it(`Should return the specified version of object ${variation.it}`, done =>
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId1);
return done();
}));
it(`Should throw error when version non existent ${variation.it}`, done => {
const versionId = '1234567890';
return metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId }, logger, (err, object) => {
assert.deepStrictEqual(object, undefined);
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should throw error when object non existent ${variation.it}`, done => {
const objName = 'non-existent-object';
return metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should throw error when object non existent ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
return metadata.getObjectMD(bucketName, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(object, undefined);
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should return latest version when master is PHD ${variation.it}`, done => {
async.series([
next => {
const objectName = variation.vFormat === 'v0' ? 'pfx1-test-object' : '\x7fMpfx1-test-object';
// adding isPHD flag to master
const phdVersionId = generateVersionId();
params.objVal.versionId = phdVersionId;
params.objVal.isPHD = true;
updateMasterObject(objectName, phdVersionId, params.objVal,
variation.vFormat, next);
},
// Should return latest object version
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId2);
delete params.isPHD;
return next();
}),
], done);
});
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
const versioningParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
// putting a delete marker as last version
next => {
params.versionId = null;
params.objVal.isDeleteMarker = true;
return metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal, versioningParams,
logger, next);
},
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.isDeleteMarker, true);
params.objVal.isDeleteMarker = null;
return next();
}),
], done);
});
});
});
});

View File

@ -0,0 +1,412 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo');
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.listObject', () => {
let metadata;
function putBulkObjectVersions(bucketName, objName, objVal, params, versionNb, cb) {
let count = 0;
async.whilst(
() => count < versionNb,
cbIterator => {
count++;
// eslint-disable-next-line
return metadata.putObjectMD(bucketName, objName, objVal, params,
logger, cbIterator);
},
err => {
if (err) {
return cb(err);
}
return cb(null);
},
);
}
beforeAll(done => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
return next();
}),
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx3-test-object',
objVal: {
key: 'pfx3-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should list master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx3-test-object');
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 2,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
return done();
});
});
it(`Should list master versions of objects that start with prefix ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
prefix: 'pfx2',
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx2-test-object');
return done();
});
});
it(`Should return empty results when bucket non existent (master) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it(`Should list all versions of objects ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterVersions',
maxKeys: 1000,
};
const versionsPerKey = {};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 15);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 5,
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
return done();
});
});
it(`Should list versions of objects that start with prefix ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
prefix: 'pfx2',
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
return done();
});
});
it(`Should return empty results when bucket not existing (version) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Versions.length, 0);
return done();
});
});
it(`should check entire list with pagination (version) ${variation.it}`, done => {
const versionsPerKey = {};
const bucketName = BUCKET_NAME;
const get = (maxKeys, keyMarker, versionIdMarker, cb) => metadata.listObject(bucketName, {
listingType: 'DelimiterVersions',
maxKeys,
keyMarker,
versionIdMarker,
}, logger, (err, res) => {
if (err) {
return cb(err);
}
res.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
if (res.IsTruncated) {
return get(maxKeys, res.NextKeyMarker, res.NextVersionIdMarker, cb);
}
return cb(null);
});
return get(3, null, null, err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(Object.keys(versionsPerKey).length, 3);
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
done();
});
});
it(`should not list phd master key when listing masters ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
prefix: 'pfx1',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
}),
], done);
});
it(`should not list phd master key when listing versions ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
prefix: 'pfx1',
};
let lastVersionId;
let versionIds;
async.series([
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 5);
versionIds = data.Versions.map(version => version.VersionId);
return next();
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
const newVersionIds = data.Versions.map(version => version.VersionId);
assert.strictEqual(data.Versions.length, 5);
assert(versionIds.every(version => newVersionIds.includes(version)));
return next();
}),
], done);
});
});
});

View File

@ -0,0 +1,429 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo');
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const OBJECT_NAME = 'test-object';
const VERSION_ID = '98451712418844999999RG001 22019.0';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27021 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface:metadata.putObjectMD', () => {
let metadata;
let collection;
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}, (err, doc) => {
if (err) {
return cb(err);
}
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
});
}
function getObjectCount(cb) {
collection.countDocuments((err, count) => {
if (err) {
cb(err);
}
cb(null, count);
});
}
beforeAll(done => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27021',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should put a new non versionned object ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: 'null',
updated: false,
};
const params = {
versioning: null,
versionId: null,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
return next();
});
},
// When versionning not active only one document is created (master)
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
}),
], done);
});
it(`Should update the metadata ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: 'null',
updated: false,
};
const params = {
versioning: null,
versionId: null,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
objVal.updated = true;
metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// object metadata must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.updated, true);
return next();
});
},
// Only a master version should be created
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
}),
], done);
});
it(`Should put versionned object with the specific versionId ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// checking if metadata corresponds to what was given to the function
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.versionId, VERSION_ID);
return next();
});
},
// We'll have one master and one version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
it(`Should put new version and update master ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId = null;
async.series([
// We first create a master and a version
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
versionId = JSON.parse(data).versionId;
return next();
}),
// We put another version of the object
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.notStrictEqual(object.versionId, versionId);
return next();
});
},
// we'll have two versions and one master
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 3);
return next();
}),
], done);
});
it(`Should update master when versionning is disabled ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId = null;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
versionId = JSON.parse(data).versionId;
return next();
}),
next => {
// Disabling versionning and putting new version
params.versioning = false;
params.versionId = '';
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.notStrictEqual(object.versionId, versionId);
return next();
});
},
// The second put shouldn't create a new version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
it(`Should update latest version and repair master ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
// Updating the version and repairing master
params.repairMaster = true;
objVal.updated = true;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.versionId, VERSION_ID);
assert.strictEqual(object.updated, true);
return next();
});
},
// The second put shouldn't create a new version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
itOnlyInV1(`Should delete master when last version is delete marker ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// master must be deleted
next => getObject('\x7fMtest-object', err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
}),
], done);
});
itOnlyInV1(`Should create master when new version is put on top of delete marker ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// We put a new version on top of delete marker
next => {
objVal.isDeleteMarker = false;
objVal.updated = true;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// master must be created
next => getObject('\x7fMtest-object', (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.updated, true);
assert.strictEqual(object.isDeleteMarker, false);
return next();
}),
], done);
});
});
});
});

View File

@ -0,0 +1,330 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo');
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMP_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'testbucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27022 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface:withCond', () => {
let metadata;
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
beforeAll(done => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27022',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMP_NAME, opts, null, logger);
metadata.setup(done);
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
describe('::putObjectWithCond', () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
const tests = [
[
`should upsert object if an existing object does not exist ${variation.it}`,
{
initVal: null,
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 0 },
},
},
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 24 },
},
},
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
];
tests.forEach(([msg, testCase]) => it(msg, done => {
const objectKey = 'testkey';
const {
initVal, upsertVal, conditions, expectedVal, error,
} = testCase;
const params = { conditions };
async.series([
next => {
if (!initVal) {
return next();
}
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
{}, logger, next);
},
next => metadata.putObjectWithCond(BUCKET_NAME, objectKey,
upsertVal, params, logger, err => {
if (error) {
assert.deepStrictEqual(err, error);
return next();
}
assert(!err);
return next();
}),
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
(err, res) => {
assert(!err);
assert.deepStrictEqual(res, expectedVal);
next();
}),
], done);
}));
});
describe('::deleteObjectWithCond', () => {
const tests = [
[
`should return no such key if the object does not exist ${variation.it}`,
{
initVal: null,
conditions: { value: { number: 24 } },
expectedVal: null,
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: { value: { number: { $eq: 24 } } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: { value: { string: 'twenty-four' } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: {
value: {
string: 'twenty-four',
number: { $eq: 0 },
},
},
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: { value: { number: 24 } },
expectedVal: null,
error: null,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: null,
error: null,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 24 },
},
},
expectedVal: null,
error: null,
},
],
];
tests.forEach(([msg, testCase]) => it(msg, done => {
const objectKey = 'testkey';
const { initVal, conditions, expectedVal, error } = testCase;
const params = { conditions };
async.series([
next => {
if (!initVal) {
return next();
}
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
{}, logger, next);
},
next => metadata.deleteObjectWithCond(BUCKET_NAME, objectKey,
params, logger, err => {
if (error) {
assert.deepStrictEqual(err, error);
return next();
}
assert(!err);
return next();
}),
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
(err, res) => {
if (expectedVal) {
assert.deepStrictEqual(res, expectedVal);
} else {
assert.deepStrictEqual(err, errors.NoSuchKey);
}
return next();
}),
], done);
}));
});
});
});

View File

@ -0,0 +1,319 @@
'use strict'; // eslint-disable-line strict
const werelogs = require('werelogs');
const assert = require('assert');
const async = require('async');
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
const MetadataWrapper =
require('../../../lib/storage/metadata/MetadataWrapper');
const BucketRoutes =
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
const routes = new BucketRoutes(metadataWrapper, logger);
const dispatcher = new RequestDispatcher(routes);
const Bucket = 'test';
const bucketInfo = {
acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
name: Bucket,
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
ownerDisplayName: 'anonymousCoward',
creationDate: '2018-06-04T17:45:42.592Z',
mdBucketModelVersion: 8,
transient: false,
deleted: false,
serverSideEncryption: null,
versioningConfiguration: null,
locationConstraint: 'us-east-1',
readLocationConstraint: 'us-east-1',
cors: null,
replicationConfiguration: null,
lifecycleConfiguration: null,
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
isNFS: null,
};
const objects = [
'aaa',
'bbb/xaa',
'bbb/xbb',
'bbb/xcc',
'ccc',
'ddd',
];
function _getExpectedListing(prefix, objects) {
const filtered = objects.map(key => {
const deprefixed = key.slice(prefix.length);
return deprefixed.replace(/[/].*/, '/');
});
const keySet = {};
return filtered.filter(key => {
if (keySet[key]) {
return false;
}
if (key === '') {
return false;
}
keySet[key] = true;
return true;
});
}
function _listingURL(prefix, marker) {
const reSlash = /[/]/g;
const escapedPrefix = prefix.replace(reSlash, '%2F');
const escapedMarker = marker.replace(reSlash, '%2F');
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
}
function _listObjects(prefix, objects, cb) {
const keys = _getExpectedListing(prefix, objects);
const markers = keys.slice(0);
markers.unshift(undefined);
const lastKey = keys[keys.length - 1];
const listing = keys.map((key, index) => ({
key,
marker: markers[index],
NextMarker: markers[index + 1],
IsTruncated: key !== lastKey,
isPrefix: key.endsWith('/'),
}));
async.mapLimit(listing, 5, (obj, next) => {
const currentMarker = obj.marker === undefined ? '' : obj.marker;
dispatcher.get(_listingURL(prefix, prefix + currentMarker),
(err, response, body) => {
if (err) {
return next(err);
}
if (obj.isPrefix) {
assert.strictEqual(body.Contents.length, 0);
assert.strictEqual(body.CommonPrefixes.length,
1);
assert.strictEqual(body.CommonPrefixes[0],
prefix + obj.key);
} else {
assert.strictEqual(body.Contents.length, 1);
assert.strictEqual(body.CommonPrefixes.length,
0);
assert.strictEqual(body.Contents[0].key,
prefix + obj.key);
}
assert.strictEqual(body.IsTruncated,
obj.IsTruncated);
if (body.IsTruncated) {
assert.strictEqual(body.NextMarker,
prefix + obj.NextMarker);
}
return next();
});
}, err => cb(err));
}
function _createObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
{ key }, next);
}, err => {
cb(err);
});
}
function _readObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
(err, response, body) => {
assert.deepStrictEqual(body.key, key);
next(err);
});
}, err => {
cb(err);
});
}
function _deleteObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
err => next(err));
}, err => {
cb(err);
});
}
describe('Basic Metadata Proxy Server test',
() => {
jest.setTimeout(10000);
it('Shoud get the metadataInformation', done => {
dispatcher.get('/default/metadataInformation',
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(
body, { metadataVersion: 2 });
return done();
});
});
});
describe('Basic Metadata Proxy Server CRUD test', () => {
jest.setTimeout(10000);
beforeEach(done => {
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
done);
});
afterEach(done => {
dispatcher.delete(`/default/bucket/${Bucket}`, done);
});
it('Should get the bucket attributes', done => {
dispatcher.get(`/default/attributes/${Bucket}`,
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(body.name,
bucketInfo.name);
return done();
});
});
it('Should crud an object', done => {
async.waterfall([
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'gabu' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'gabu');
next(err);
}
}),
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'zome' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'zome');
next(err);
}
}),
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
err => next(err)),
], err => done(err));
});
it('Should list objects', done => {
async.waterfall([
next => _createObjects(objects, next),
next => _readObjects(objects, next),
next => _listObjects('', objects, next),
next => _listObjects('bbb/', objects, next),
next => _deleteObjects(objects, next),
], err => {
done(err);
});
});
it('Should update bucket properties', done => {
dispatcher.get(
`/default/attributes/${Bucket}`, (err, response, body) => {
assert.strictEqual(err, null);
const bucketInfo = body;
const newOwnerDisplayName = 'divertedfrom';
bucketInfo.ownerDisplayName = newOwnerDisplayName;
dispatcher.post(
`/default/attributes/${Bucket}`, bucketInfo, err => {
assert.strictEqual(err, null);
dispatcher.get(
`/default/attributes/${Bucket}`,
(err, response, body) => {
assert.strictEqual(err, null);
const newBucketInfo = body;
assert.strictEqual(
newBucketInfo.ownerDisplayName,
newOwnerDisplayName);
done(null);
});
});
});
});
it('Should fail to list a non existing bucket', done => {
dispatcher.get('/default/bucket/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('Should fail to get attributes from a non existing bucket', done => {
dispatcher.get('/default/attributes/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('should succeed a health check', done => {
dispatcher.get('/_/healthcheck', (err, response, body) => {
if (err) {
return done(err);
}
const expectedResponse = {
memorybucket: {
code: 200,
message: 'OK',
},
};
assert.strictEqual(response.responseHead.statusCode, 200);
assert.deepStrictEqual(body, expectedResponse);
return done(err);
});
});
it('should work with parallel route', done => {
const objectName = 'theObj';
async.waterfall([
next => _createObjects([objectName], next),
next => {
dispatcher.get(
`/default/parallel/${Bucket}/${objectName}`,
(err, response, body) => {
if (err) {
return next(err);
}
assert.strictEqual(response.responseHead.statusCode,
200);
const bucketMD = JSON.parse(body.bucket);
const objectMD = JSON.parse(body.obj);
const expectedObjectMD = { key: objectName };
assert.deepStrictEqual(bucketMD.name,
bucketInfo.name);
assert.deepStrictEqual(objectMD, expectedObjectMD);
return next(err);
});
},
next => _deleteObjects([objectName], next),
], done);
});
});

View File

@ -31,28 +31,28 @@ describe('StatsClient class', () => {
afterAll(() => redisClient.disconnect());
it('should correctly record a new request by default one increment',
done => {
async.series([
next => {
statsClient.reportNewRequest(id, (err, res) => {
assert.ifError(err);
done => {
async.series([
next => {
statsClient.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepEqual(res, expected);
next();
});
},
next => {
statsClient.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepEqual(res, expected);
next();
});
},
next => {
statsClient.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 2], [null, 1]];
assert.deepEqual(res, expected);
next();
});
},
], done);
});
const expected = [[null, 2], [null, 1]];
assert.deepEqual(res, expected);
next();
});
},
], done);
});
it('should record new requests by defined amount increments', done => {
function noop() {}

View File

@ -0,0 +1,318 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const RedisClient = require('../../../lib/metrics/RedisClient');
const StatsModel = require('../../../lib/metrics/StatsModel');
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: true,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const STATS_INTERVAL = 300; // 5 minutes
const STATS_EXPIRY = 86400; // 24 hours
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
function setExpectedStats(expected) {
return expected.concat(
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
}
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('StatsModel class', () => {
const id = 'arsenal-test';
const id2 = 'test-2';
const id3 = 'test-3';
afterEach(() => redisClient.clear(() => {}));
it('should convert a 2d array columns into rows and vice versa using _zip',
() => {
const arrays = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
];
const res = statsModel._zip(arrays);
const expected = [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
];
assert.deepStrictEqual(res, expected);
});
it('_zip should return an empty array if given an invalid array', () => {
const arrays = [];
const res = statsModel._zip(arrays);
assert.deepStrictEqual(res, []);
});
it('_getCount should return a an array of all valid integer values',
() => {
const res = statsModel._getCount([
[null, '1'],
[null, '2'],
[null, null],
]);
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
});
it('should correctly record a new request by default one increment',
done => {
async.series([
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 2], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should record new requests by defined amount increments', done => {
function noop() {}
async.series([
next => {
statsModel.reportNewRequest(id, 9);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
next();
});
},
next => {
statsModel.reportNewRequest(id);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([10]));
next();
});
},
next => {
statsModel.reportNewRequest(id, noop);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([11]));
next();
});
},
], done);
});
it('should correctly record a 500 on the server', done => {
statsModel.report500(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
done();
});
});
it('should respond back with total requests as an array', done => {
async.series([
next => {
statsModel.reportNewRequest(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.report500(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([1]),
'500s': setExpectedStats([1]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should not crash on empty results', done => {
async.series([
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should return a zero-filled array if no ids are passed to getAllStats',
done => {
statsModel.getAllStats(fakeLogger, [], (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([]));
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
done();
});
});
it('should get accurately reported data for given id from getAllStats',
done => {
statsModel.reportNewRequest(id, 9);
statsModel.reportNewRequest(id2, 2);
statsModel.reportNewRequest(id3, 3);
statsModel.report500(id);
async.series([
next => {
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 9);
assert.equal(res['500s'][0], 1);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, [id, id2, id3],
(err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 14);
assert.deepStrictEqual(res.requests,
setExpectedStats([14]));
next();
});
},
], done);
});
it('should normalize to the nearest hour using normalizeTimestampByHour',
() => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = new Date(statsModel.normalizeTimestampByHour(date));
assert.strictEqual(date.getHours(), newDate.getHours());
assert.strictEqual(newDate.getMinutes(), 0);
assert.strictEqual(newDate.getSeconds(), 0);
assert.strictEqual(newDate.getMilliseconds(), 0);
});
it('should get previous hour using _getDatePreviousHour', () => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = statsModel._getDatePreviousHour(new Date(date));
const millisecondsInOneHour = 3600000;
assert.strictEqual(date - newDate, millisecondsInOneHour);
});
it('should get an array of hourly timestamps using getSortedSetHours',
() => {
const epoch = 1536882476501;
const millisecondsInOneHour = 3600000;
const expected = [];
let dateInMilliseconds = statsModel.normalizeTimestampByHour(
new Date(epoch));
for (let i = 0; i < 24; i++) {
expected.push(dateInMilliseconds);
dateInMilliseconds -= millisecondsInOneHour;
}
const res = statsModel.getSortedSetHours(epoch);
assert.deepStrictEqual(res, expected);
});
it('should apply TTL on a new sorted set using addToSortedSet', done => {
const key = 'a-test-key';
const score = 100;
const value = 'a-value';
const now = Date.now();
const nearestHour = statsModel.normalizeTimestampByHour(new Date(now));
statsModel.addToSortedSet(key, score, value, (err, res) => {
assert.ifError(err);
// check both a "zadd" and "expire" occurred
assert.equal(res, 1);
redisClient.ttl(key, (err, res) => {
assert.ifError(err);
// assert this new set has a ttl applied
assert(res > 0);
const adjustmentSecs = now - nearestHour;
const msInADay = 24 * 60 * 60 * 1000;
const msInAnHour = 60 * 60 * 1000;
const upperLimitSecs =
Math.ceil((msInADay - adjustmentSecs) / 1000);
const lowerLimitSecs =
Math.floor((msInADay - adjustmentSecs - msInAnHour) / 1000);
// assert new ttl is between 23 and 24 hours adjusted by time
// elapsed since normalized hourly time
assert(res >= lowerLimitSecs);
assert(res <= upperLimitSecs);
done();
});
});
});
});

View File

@ -0,0 +1,326 @@
const assert = require('assert');
const ChainBackend = require('../../../lib/auth/auth').backends.chainBackend;
const BaseBackend = require('../../../lib/auth/auth').backends.baseBackend;
const errors = require('../../../lib/errors');
const testError = new Error('backend error');
const backendWithAllMethods = {
verifySignatureV2: () => {},
verifySignatureV4: () => {},
getCanonicalIds: () => {},
getEmailAddresses: () => {},
checkPolicies: () => {},
healthcheck: () => {},
};
function getBackendWithMissingMethod(methodName) {
const backend = Object.assign({}, backendWithAllMethods);
delete backend[methodName];
return backend;
}
class TestBackend extends BaseBackend {
constructor(service, error, result) {
super(service);
this._error = error;
this._result = result;
}
verifySignatureV2(stringToSign, signatureFromRequest, accessKey, options, callback) {
return callback(this._error, this._result);
}
verifySignatureV4(stringToSign, signatureFromRequest, accessKey, region, scopeDate, options, callback) {
return callback(this._error, this._result);
}
getCanonicalIds(emailAddresses, options, callback) {
return callback(this._error, this._result);
}
getEmailAddresses(canonicalIDs, options, callback) {
return callback(this._error, this._result);
}
checkPolicies(requestContextParams, userArn, options, callback) {
return callback(this._error, this._result);
}
healthcheck(reqUid, callback) {
return callback(this._error, this._result);
}
}
describe('Auth Backend: Chain Backend', () => {
[
['should throw an error if client list is not an array', null],
['should throw an error if client list empty', []],
['should throw an error if a client is missing the verifySignatureV2 method', [
new TestBackend(),
getBackendWithMissingMethod('verifySignatureV2'),
]],
['should throw an error if a client is missing the verifySignatureV4 auth method', [
new TestBackend(),
getBackendWithMissingMethod('verifySignatureV4'),
]],
['should throw an error if a client is missing the getCanonicalId method', [
new TestBackend(),
getBackendWithMissingMethod('getCanonicalIds'),
]],
['should throw an error if a client is missing the getEmailAddresses method', [
new TestBackend(),
getBackendWithMissingMethod('getEmailAddresses'),
]],
['should throw an error if a client is missing the checkPolicies method', [
new TestBackend(),
getBackendWithMissingMethod('checkPolicies'),
]],
['should throw an error if a client is missing the healthcheck method', [
new TestBackend(),
getBackendWithMissingMethod('healthcheck'),
]],
].forEach(([msg, input]) => it(msg, () => {
assert.throws(() => {
new ChainBackend('chain', input); // eslint-disable-line no-new
});
}));
[
// function name, function args
['verifySignatureV2', [null, null, null, null]],
['verifySignatureV4', [null, null, null, null, null, null]],
].forEach(([fn, fnArgs]) =>
describe(`::${fn}`, () => {
it('should return an error if none of the clients returns a result', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', testError, null),
new TestBackend('test2', testError, null),
new TestBackend('test3', testError, null),
]);
backend[fn](...fnArgs, err => {
assert.deepStrictEqual(err, testError);
done();
});
});
[
[
'should return result of the first successful client (multiple successful client)',
'expectedResult',
// backend constructor args
[
['test1', null, 'expectedResult'],
['test2', null, 'test2'],
['test3', testError, null],
],
],
[
'should return result of successful client',
'expectedResult',
// backend constructor args
[
['test1', testError, null],
['test2', null, 'expectedResult'],
['test3', testError, null],
],
],
[
'should return result of successful client',
'expectedResult',
// backend constructor args
[
['test1', testError, null],
['test1', testError, null],
['test3', null, 'expectedResult'],
],
],
].forEach(([msg, expected, backendArgs]) => {
it(msg, done => {
const backend = new ChainBackend('chain',
backendArgs.map((args) => new TestBackend(...args)));
backend[fn](...fnArgs, (err, res) => {
assert.ifError(err);
assert.strictEqual(res, expected);
done();
});
});
});
}));
[
// function name, function args
['getCanonicalIds', [null, null]],
['getEmailAddresses', [null, null]],
].forEach(([fn, fnArgs]) =>
describe(`::${fn}`, () => {
it('should return an error if any of the clients fails', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, { message: { body: { test1: 'aaa' } } }),
new TestBackend('test2', testError, null),
new TestBackend('test3', null, { message: { body: { test2: 'bbb' } } }),
]);
backend[fn](...fnArgs, err => {
assert.deepStrictEqual(err, testError);
done();
});
});
it('should merge results from clients into a single response object', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, { message: { body: { test1: 'aaa' } } }),
new TestBackend('test2', null, { message: { body: { test2: 'bbb' } } }),
]);
backend[fn](...fnArgs, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res, {
message: { body: {
test1: 'aaa',
test2: 'bbb',
} },
});
done();
});
});
}));
describe('::checkPolicies', () => {
it('should return an error if any of the clients fails', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, {
message: { body: [{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/obj1' }] },
}),
new TestBackend('test2', testError, null),
new TestBackend('test3', null, {
message: { body: [{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/obj1' }] },
}),
]);
backend.checkPolicies(null, null, null, err => {
assert.deepStrictEqual(err, testError);
done();
});
});
it('should merge results from clients into a single response object', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, {
message: { body: [{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/obj1' }] },
}),
new TestBackend('test2', null, {
message: { body: [{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/obj2' }] },
}),
new TestBackend('test3', null, {
message: { body: [{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/obj1' }] },
}),
]);
backend.checkPolicies(null, null, null, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res, {
message: { body: [
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/obj1' },
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/obj2' },
] },
});
done();
});
});
});
describe('::_mergeObject', () => {
it('should correctly merge reponses', () => {
const objectResps = [
{ message: { body: {
id1: 'email1@test.com',
wrongformatcanid: 'WrongFormat',
id4: 'email4@test.com',
} } },
{ message: { body: {
id2: 'NotFound',
id3: 'email3@test.com',
id4: 'email5@test.com',
} } },
];
assert.deepStrictEqual(
ChainBackend._mergeObjects(objectResps),
{
id1: 'email1@test.com',
wrongformatcanid: 'WrongFormat',
id2: 'NotFound',
id3: 'email3@test.com',
// id4 should be overwritten
id4: 'email5@test.com',
},
);
});
});
describe('::_mergePolicies', () => {
it('should correctly merge policies', () => {
const policyResps = [
{ message: { body: [
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/true1' },
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true2' },
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false1' },
] } },
{ message: { body: [
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true1' },
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/true2' },
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false2' },
] } },
];
assert.deepStrictEqual(
ChainBackend._mergePolicies(policyResps),
[
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true1' },
{ isAllowed: true, arn: 'arn:aws:s3:::policybucket/true2' },
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false1' },
{ isAllowed: false, arn: 'arn:aws:s3:::policybucket/false2' },
],
);
});
});
describe('::checkhealth', () => {
it('should return error if a single client is unhealthy', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, { code: 200 }),
new TestBackend('test2', testError, { code: 503 }),
new TestBackend('test3', null, { code: 200 }),
]);
backend.healthcheck(null, (err, res) => {
assert.deepStrictEqual(err, errors.InternalError);
assert.deepStrictEqual(res, [
{ error: null, status: { code: 200 } },
{ error: testError, status: { code: 503 } },
{ error: null, status: { code: 200 } },
]);
done();
});
});
it('should return result if all clients are healthy', done => {
const backend = new ChainBackend('chain', [
new TestBackend('test1', null, { msg: 'test1', code: 200 }),
new TestBackend('test2', null, { msg: 'test2', code: 200 }),
new TestBackend('test3', null, { msg: 'test3', code: 200 }),
]);
backend.healthcheck(null, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res, [
{ error: null, status: { msg: 'test1', code: 200 } },
{ error: null, status: { msg: 'test2', code: 200 } },
{ error: null, status: { msg: 'test3', code: 200 } },
]);
done();
});
});
});
});

View File

@ -89,10 +89,10 @@ describe('AuthLoader class', () => {
// Check a failure when the type of field is different than
// expected
it(`should fail when modified field ${test[0]} ${test[1]}`,
done => {
should._exec = shouldFail;
should.modifiedField(obj, test[0], test[1], done);
});
done => {
should._exec = shouldFail;
should.modifiedField(obj, test[0], test[1], done);
});
}
});

Some files were not shown because too many files have changed in this diff Show More