Compare commits

...

1200 Commits

Author SHA1 Message Date
Vitaliy Filippov 19855115ae Use TS? 2024-08-06 19:56:20 +03:00
Vitaliy Filippov 329d8ef32c Add Vitastor support 2024-08-05 02:23:54 +03:00
Vitaliy Filippov f0ded4ea4f Use swc to transpile during installation 2024-08-04 00:00:10 +03:00
Vitaliy Filippov 3eea263384 Use ^ dependencies, suppress aws-sdk maintenance mode message 2024-08-04 00:00:01 +03:00
Vitaliy Filippov c26d4f7d70 Fix readUInt with length 8 2024-08-04 00:00:01 +03:00
Vitaliy Filippov 63137e7a7b Change git dependency URLs 2024-08-04 00:00:01 +03:00
Vitaliy Filippov fdb23b1cd2 Remove yarn lock 2024-08-04 00:00:01 +03:00
Vitaliy Filippov 4120eac127 Make sproxydclient and hdclient dependencies optional 2024-08-04 00:00:01 +03:00
Maha Benzekri d9bbd6cf3e
bump project version
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:22:01 +02:00
Maha Benzekri 65e89d286d
ensure callback is only called once on AwsClient
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:21:56 +02:00
Maha Benzekri dcbc5ca98f
ensure callback is only called once on MutipleBackendGateway
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:21:44 +02:00
Maha Benzekri 817bb836ec
ARSN-420: bump arsenal version 2024-07-15 15:20:08 +02:00
Maha Benzekri e3e4b2aea7
ARSN-420: putObjectNoVar function update with hack
We agreed on Introducing the same “hack” as in internalDelete function,
so write the MD twice in the oplog: one "deleted: true" copy of the previous MD,
followed by the expected update with the new metadata
2024-07-15 15:19:06 +02:00
Francois Ferrand 9cd72221e8
Bump arsenal 8.1.132
Issue: ARSN-421
2024-07-10 18:45:22 +02:00
Francois Ferrand bdcd4685ad
gha: bump codecov v4
and use codecov token.

Issue: ARSN:421
2024-07-10 18:45:22 +02:00
Francois Ferrand b2b6c47ba7
Introduce objectGetArchiveInfo verb
This may be used to allow access to more details about archived objects.

Issue: ARSN-421
2024-07-10 18:29:53 +02:00
Jonathan Gramain da173d53b4 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-425-listingLatestCrashWithUndefined' into w/8.1/bugfix/ARSN-425-listingLatestCrashWithUndefined 2024-07-08 11:28:59 -07:00
Jonathan Gramain 7eb2701f21 Merge remote-tracking branch 'origin/bugfix/ARSN-425-listingLatestCrashWithUndefined' into w/7.70/bugfix/ARSN-425-listingLatestCrashWithUndefined 2024-07-08 11:03:50 -07:00
Jonathan Gramain 6ec3c8e10d ARSN-425 bump arsenal version 2024-07-08 10:59:25 -07:00
Jonathan Gramain 7aaf277db2 bf: ARSN-425 listing crash if key contains "undefined"
Fix a crash in DelimiterMaster listing without a delimiter, when a key
contains the string "undefined".

Note: a similar fix was done in ARSN-330 for DelimiterVersions. I
ported the existing unit test there to the development/7.10 branch to
enhance regression testing, even though this bug on DelimiterVersions
only existed on 7.70.
2024-07-08 10:56:48 -07:00
Francois Ferrand 67421f8c76
Merge branch 'w/7.70/improvement/ARSN-415' into w/8.1/improvement/ARSN-415 2024-05-10 14:28:11 +02:00
Anurag Mittal f13ec2cf4c
Merge remote-tracking branch 'origin/bugfix/ARSN-412-add-support-for-exists-condition' into w/8.1/bugfix/ARSN-412-add-support-for-exists-condition 2024-05-03 13:37:07 +02:00
williamlardier 30eaaf15eb ARSN-406: bump project version 2024-05-02 09:01:13 +02:00
williamlardier 9d16fb0a34 ARSN-406: create the QuotaExceeded error 2024-05-02 09:01:06 +02:00
williamlardier cdc612f379 ARSN-406: add quota numbers in report 2024-05-02 09:00:51 +02:00
williamlardier 61dd65b2c4 ARSN-406: add request context options for quota evaluation 2024-05-02 09:00:00 +02:00
bert-e 2c0696322e Merge branch 'improvement/ARSN-410-quotas-for-bucket-apis' into q/8.1 2024-04-30 16:08:07 +00:00
Maha Benzekri c464a70b90
ARSN-410: bump project version 2024-04-30 17:19:42 +02:00
Maha Benzekri af07bb3df4
ARSN-410: adding api methods in actionMonitoringMapS3 2024-04-30 17:19:20 +02:00
Maha Benzekri 1858654f34
ARSN-410: new no such quota error 2024-04-30 17:18:54 +02:00
Maha Benzekri 0475c8520a
ARSN-410: update routes for bucket get/put/delete quota 2024-04-30 17:18:12 +02:00
Maha Benzekri 31a4de5372
ARSN-410: add getbucketQuota in metaDataWrapper 2024-04-30 17:17:46 +02:00
Maha Benzekri 0c53d13439
ARSN-410: update bucketInfo test 2024-04-30 17:17:18 +02:00
Maha Benzekri cad8b14df1
ARSN-410: update bucketInfo and md 2024-04-30 17:16:50 +02:00
Nicolas Humbert fe29bacc79 Merge remote-tracking branch 'origin/bugfix/ARSN-413/null' into w/8.1/bugfix/ARSN-413/null 2024-04-30 10:26:58 +02:00
Maha Benzekri ca8f570f15
ARSN-404: project bump 2024-04-05 11:35:52 +02:00
Maha Benzekri a4bca10faf
ARSN-404: adding permission in BP and IAM action Map 2024-04-05 11:35:52 +02:00
Jonathan Gramain c2ab4a2052 ARSN-402 [8.1] typescript fixes 2024-03-13 09:10:25 -07:00
Jonathan Gramain fd0aa314eb Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-402-batchDeleteRequestLogger' into w/8.1/bugfix/ARSN-402-batchDeleteRequestLogger 2024-03-13 09:10:21 -07:00
Mickael Bourgois 5b8fcf0313
ARSN-401: Bump version 2024-03-08 14:11:30 +01:00
Mickael Bourgois bdfde26fe4
Merge remote-tracking branch 'origin/improvement/ARSN-401-cluster-rpc-primary' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-03-08 14:11:06 +01:00
Mickael Bourgois e53613783a
Merge remote-tracking branch 'origin/development/8.1' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-03-08 14:10:12 +01:00
Nicolas Humbert a1dc2bd84d Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-403/bump' into w/8.1/bugfix/ARSN-403/bump 2024-03-06 16:40:02 +01:00
bert-e 77ed018b4f Merge branch 'w/7.70/bugfix/ARSN-403/fix-put-metadata-2' into tmp/octopus/w/8.1/bugfix/ARSN-403/fix-put-metadata-2 2024-03-05 12:41:44 +00:00
bert-e 05c628728d Merge branch 'w/7.70/bugfix/ARSN-403/fix-put-metadata-2' into tmp/octopus/w/8.1/bugfix/ARSN-403/fix-put-metadata-2 2024-03-04 13:23:08 +00:00
bert-e 0dd7fe9875 Merge branch 'improvement/ARSN-401-cluster-rpc-primary' into tmp/octopus/w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-02-29 08:58:13 +00:00
Mickael Bourgois e6d0eff1a8
Merge remote-tracking branch 'origin/improvement/ARSN-401-cluster-rpc-primary' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-02-28 01:52:02 +01:00
bert-e 7e372b7bd5 Merge branches 'w/8.1/improvement/ARSN-400-scuba-admin' and 'q/2224/7.70/improvement/ARSN-400-scuba-admin' into tmp/octopus/q/8.1 2024-02-26 13:59:56 +00:00
Nicolas Humbert 06402c6c94 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-392/bump' into w/8.1/bugfix/ARSN-392/bump 2024-02-21 10:11:29 +01:00
bert-e a1eed4fefb Merge branch 'bugfix/ARSN-392/null7.70' into tmp/octopus/w/8.1/bugfix/ARSN-392/null7.70 2024-02-20 14:22:16 +00:00
bert-e d8f7f18f5a Merge branches 'w/8.1/bugfix/ARSN-392/null' and 'q/2215/7.70/bugfix/ARSN-392/null' into tmp/octopus/q/8.1 2024-02-20 14:02:12 +00:00
bert-e e151b3fff1 Merge branch 'w/7.70/bugfix/ARSN-392/null' into tmp/octopus/w/8.1/bugfix/ARSN-392/null 2024-02-20 13:54:33 +00:00
williamlardier b6bc11881a Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:26:47 +01:00
williamlardier 648257612b Merge remote-tracking branch 'origin/development/8.1' into w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:26:06 +01:00
Jonathan Gramain 1a0a981271 Merge remote-tracking branch 'origin/bugfix/ARSN-398-doNotRefreshGapBuildingIfDisabled' into w/8.1/bugfix/ARSN-398-doNotRefreshGapBuildingIfDisabled 2024-02-16 10:04:07 -08:00
bert-e a45b2eb6a4 Merge branch 'w/7.70/improvement/ARSN-400-scuba-admin' into tmp/octopus/w/8.1/improvement/ARSN-400-scuba-admin 2024-02-16 10:29:54 +00:00
bert-e 15fd621c5c Merge branches 'w/8.1/feature/ARSN-397-gapCacheClear' and 'q/2222/7.70/feature/ARSN-397-gapCacheClear' into tmp/octopus/q/8.1 2024-02-15 19:07:32 +00:00
bert-e 285fe2f63b Merge branches 'w/8.1/bugfix/ARSN-394-GapCacheInvalidateStagingGaps' and 'q/2218/7.70/bugfix/ARSN-394-GapCacheInvalidateStagingGaps' into tmp/octopus/q/8.1 2024-02-15 19:07:20 +00:00
bert-e 00555597e0 Merge branch 'feature/ARSN-397-gapCacheClear' into tmp/octopus/w/8.1/feature/ARSN-397-gapCacheClear 2024-02-15 18:59:42 +00:00
bert-e bddc2ccd01 Merge branch 'bugfix/ARSN-394-GapCacheInvalidateStagingGaps' into tmp/octopus/w/8.1/bugfix/ARSN-394-GapCacheInvalidateStagingGaps 2024-02-15 18:59:33 +00:00
Jonathan Gramain 0d7cf8d40a Merge remote-tracking branch 'origin/feature/ARSN-389-optimizeListingWithGapCache' into w/8.1/feature/ARSN-389-optimizeListingWithGapCache 2024-02-14 10:24:17 -08:00
williamlardier 851c72bd0f ARSN-396: consider action and isImplicit flags in multipeBackend
The new flags are set when IAM returns detailed information about
the actions, whether they are allowed or denied, with the
isImplicit flag. The mergePolicy must be updated to support the
new fields, and do not merge policies that are for different
actions.

Note that this function will consider that any Allow takes
precedence, so this behavior is not changed.
2024-02-14 12:35:22 +01:00
bert-e 722b6ae699 Merge branch 'w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into tmp/octopus/w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-14 11:13:29 +00:00
bert-e 3c2283b062 Merge branch 'bugfix/ARSN-393-infiniteLoopInCoalesceGapChain' into tmp/octopus/w/8.1/bugfix/ARSN-393-infiniteLoopInCoalesceGapChain 2024-02-13 18:15:57 +00:00
Jonathan Gramain 6a116734a9 ARSN-388 [fixup 8.1] merge fix: add missing files 2024-02-09 10:10:43 -08:00
Jonathan Gramain 9325ea4996 Merge remote-tracking branch 'origin/feature/ARSN-391-gapCache' into w/8.1/feature/ARSN-391-gapCache 2024-02-09 10:00:08 -08:00
Jonathan Gramain 33ba89f0cf Merge remote-tracking branch 'origin/feature/ARSN-388-gapSet' into w/8.1/feature/ARSN-388-gapSet 2024-02-09 09:45:36 -08:00
Mickael Bourgois be1557d972
ARSN-390: Bump version 2024-02-05 20:03:24 +01:00
Mickael Bourgois a03463061c
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-390-scuba-arn' into w/8.1/improvement/ARSN-390-scuba-arn 2024-02-05 20:03:10 +01:00
Frédéric Meinnel 5a5ef7c572 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests' into w/8.1/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 13:15:43 +01:00
Frédéric Meinnel f8ce90f9c3 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.1/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-16 17:58:09 +01:00
Jonathan Gramain 6f58f9dd68 Merge remote-tracking branch 'origin/improvement/ARSN-381-cluster-rpc-helpers' into w/8.1/improvement/ARSN-381-cluster-rpc-helpers 2024-01-11 16:34:37 -08:00
bert-e 042f541a45 Merge branches 'w/8.1/bugfix/ARSN-384-redirect-error-body' and 'q/2207/7.70/bugfix/ARSN-384-redirect-error-body' into tmp/octopus/q/8.1 2024-01-10 10:23:22 +00:00
Mickael Bourgois 02f126f040
ARSN-384: fix after merge 8.1 param name 2024-01-10 11:15:38 +01:00
bert-e 1477a70e47 Merge branch 'w/7.70/bugfix/ARSN-384-redirect-error-body' into tmp/octopus/w/8.1/bugfix/ARSN-384-redirect-error-body 2024-01-10 09:51:16 +00:00
Frédéric Meinnel 59d47a3e21 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight' into w/8.1/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight 2024-01-09 10:35:12 +01:00
Frédéric Meinnel 6b61347c29 Merge remote-tracking branch 'origin/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight' into w/8.1/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight 2024-01-08 18:22:57 +01:00
bert-e c2f6b45116 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-03 08:52:09 +00:00
bert-e 993b9e6093 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-02 18:09:07 +00:00
bert-e 7440794d93 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-02 10:53:55 +00:00
bert-e 087369b37d Merge branches 'w/8.1/improvement/ARSN-363-retention-day-condition' and 'q/2191/7.70/improvement/ARSN-363-retention-day-condition' into tmp/octopus/q/8.1 2023-12-26 10:55:59 +00:00
Will Toozs da80e12dab
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-363-retention-day-condition' into w/8.1/improvement/ARSN-363-retention-day-condition 2023-12-26 11:49:28 +01:00
Jonathan Gramain 2a82095d03 ARSN-379 [8.1] bump arsenal version 2023-12-22 12:41:17 -08:00
Jonathan Gramain 44b3d25459 ARSN-379 [8.1] adapt skipping delete markers in DelimiterMaster
With the MongoDB implementation there may be delete markers in the
masters prefix to go through.

Replace the original implementation for this by a new implementation
compatible with the latest DelimiterMaster changes.

Note: changed the returned value from FILTER_SKIP to FILTER_ACCEPT:
this is the correct logic as there is no range to skip, only the key
shouldn't be added to the results.
2023-12-22 12:41:01 -08:00
Jonathan Gramain f1d6e30fb6 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293' into w/8.1/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293 2023-12-22 12:40:18 -08:00
bert-e 37234efd14 Merge branch 'improvement/ARSN-380-delimiterVersionsInheritFromExtension' into tmp/octopus/w/8.1/improvement/ARSN-380-delimiterVersionsInheritFromExtension 2023-12-20 20:01:59 +00:00
Jonathan Gramain f4e83086d6 Merge remote-tracking branch 'origin/bugfix/ARSN-377-v1NullKeyDeleteMarkerNotInCommonPrefixes' into w/8.1/bugfix/ARSN-377-v1NullKeyDeleteMarkerNotInCommonPrefixes 2023-12-14 14:54:24 -08:00
Maha Benzekri 74ff1691a0
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-378-BP-authorization' into w/8.1/improvement/ARSN-378-BP-authorization 2023-12-14 11:58:47 +01:00
bert-e 2a4ea38301 Merge branch 'w/7.70/improvement/ARSN-378-BP-authorization' into tmp/octopus/w/8.1/improvement/ARSN-378-BP-authorization 2023-12-14 10:55:37 +00:00
Francois Ferrand d800179f86
Release arsenal 8.1.115
Issue: ARSN-374
2023-12-01 17:28:59 +01:00
Francois Ferrand c1c45a4af9
gha: upgrade actions
Issue: ARSN-374
2023-12-01 17:27:41 +01:00
Francois Ferrand da536ed037
ObjectMD: Add transition time
Store transition time when marking the object as ‘transition in
progress’. This is used to compute metrics on the duration of transition.

Issue: ARSN-374
2023-12-01 17:27:41 +01:00
Nicolas Humbert 06901104e8 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-376/probe' into w/8.1/bugfix/ARSN-376/probe 2023-12-01 13:38:36 +01:00
Benoit A. 863f45d256
ARSN-373 bump hdclient to 1.1.7 2023-11-20 16:52:41 +01:00
KillianG 4b642cf8b4
Add custom listing parser to MongoDB listObject
test to check for location param is absent

Issue: ARSN-372
2023-11-17 17:45:10 +01:00
KillianG 2537f8aa9a
Exclude location field from search query in MongoReadStream.
Issue: ARSN-372
2023-11-13 11:07:43 +01:00
Maha Benzekri 7866a1d06f
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-362-implicitDeny' into w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 16:55:21 +01:00
Maha Benzekri 1509f1bdfe
fix 2023-10-30 16:47:32 +01:00
Maha Benzekri 13d349d211
fix 2023-10-30 16:40:00 +01:00
Maha Benzekri 34a32c967d
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-362-implicitDeny' into w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 16:38:08 +01:00
bert-e d79ed1b9c8 Merge branch 'w/7.70/improvement/ARSN-362-implicitDeny' into tmp/octopus/w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 15:01:06 +00:00
williamlardier 17b5bbc233 ARSN-370: bump project version 2023-10-06 09:14:13 +02:00
williamlardier 4aa8b5cc6e ARSN-370: handle error cases 2023-10-06 09:13:46 +02:00
williamlardier 5deed6c2e1 ARSN-370: fix memory leak
The MongoDBReadStreams are not properly destroyed in both the
Bucket V1 and V0 cases. In the V1 case, only the pipe-ed stream,
the Transform one, is cleaned. In the V0 case, we directly call
the callback without properly cleaning the stream, leaving open,
in both cases, the mongodb cursors, that in turn affect the
mongos memory consumption.
2023-10-06 09:13:46 +02:00
Nicolas Humbert af34571771 Merge remote-tracking branch 'origin/bugfix/ARSN-369/skip' into w/8.1/bugfix/ARSN-369/skip 2023-10-05 11:49:01 +02:00
Nicolas Humbert 5fd675a316 Merge remote-tracking branch 'origin/improvement/ARSN-366/listing-scanned-limit' into w/8.1/improvement/ARSN-366/listing-scanned-limit 2023-09-27 17:22:45 +02:00
Maha Benzekri dcf0f902ff
Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-367-principal-user-arn-on-policy' into w/8.1/bugfix/ARSN-367-principal-user-arn-on-policy 2023-09-25 12:20:06 +02:00
bert-e 5b66f8d089 Merge branch 'w/7.70/bugfix/ARSN-365-id-on-resource-policy' into tmp/octopus/w/8.1/bugfix/ARSN-365-id-on-resource-policy 2023-09-13 06:27:36 +00:00
Florent Monjalet e51b06cfea ARSN-364: bump arsenal to 8.1.109 2023-08-31 18:46:36 +02:00
Florent Monjalet f2bc701f8c ARSN-364: bump sproxydclient to 8.0.10 (for SPRXCLT-12) 2023-08-31 18:46:06 +02:00
Nicolas Humbert 4d6b03ba47 ARSN-360 bump package version 2023-08-11 13:31:22 -04:00
Nicolas Humbert f03f049683 ARSN-360 Test enable V0 bucket format for Artesca lifecycle listing 2023-08-11 12:37:25 -04:00
Nicolas Humbert d7b51de024 ARSN-360 Enable V0 bucket format for Artesca lifecycle listing 2023-08-11 08:30:55 -04:00
Nicolas Humbert cf51adf1c7 Merge remote-tracking branch 'origin/bugfix/ARSN-359/max-keys' into w/8.1/bugfix/ARSN-359/max-keys 2023-08-08 19:59:22 -04:00
Nicolas Humbert 2b2667e29a Merge remote-tracking branch 'origin/improvement/ARSN-358/bump' into w/8.1/improvement/ARSN-358/bump 2023-08-08 13:16:11 -04:00
bert-e 862317703e Merge branch 'improvement/ARSN-356/list-orphan-delete-marker-v0' into tmp/octopus/w/8.1/improvement/ARSN-356/list-orphan-delete-marker-v0 2023-08-04 21:25:02 +00:00
bert-e 547ce816e0 Merge branch 'improvement/ARSN-355/list-non-current-v0' into tmp/octopus/w/8.1/improvement/ARSN-355/list-non-current-v0 2023-08-04 17:03:23 +00:00
bert-e 15d5e93a2d Merge branch 'improvement/ARSN-354/list-current-v0' into tmp/octopus/w/8.1/improvement/ARSN-354/list-current-v0 2023-08-01 15:56:22 +00:00
bert-e d11bcb56e9 Merge branch 'improvement/ARSN-352/list-current' into tmp/octopus/w/8.1/improvement/ARSN-352/list-current 2023-08-01 14:16:19 +00:00
bert-e 0ed35c3d86 Merge branch 'q/2151/7.70/improvement/ARSN-351/backport' into tmp/normal/q/8.1 2023-07-21 16:40:33 +00:00
Nicolas Humbert c0218821ff Merge remote-tracking branch 'origin/improvement/ARSN-351/backport' into w/8.1/improvement/ARSN-351/backport 2023-07-21 12:30:02 -04:00
Nicolas Humbert 7c4f461196 bump version 2023-07-14 09:20:58 -04:00
Nicolas Humbert 0a4d6f862f ARSN-350 Missing Null Version in Lifecycle List of Non-Current Versions
Note: We only support the v1 bucket format for "list lifecycle" in Artesca.

We made the assumption that the first version key stored the current/latest version, which is true in most cases except for "null" versions. In the case of a "null" version, the current version is stored in the master key alone, rather than being stored in both the master key and a new version key. Here's an example of the key structure:

Mkey0: Represents the null version ID.
VKey0<versionID>: Represents a non-current version.

Additionally, we assumed that the versions for a given key were ordered by creation date, from newest to oldest. However, in Ring S3C, for non-current null versions, the metadata version ID is not part of the metadata key id. Therefore, the non-current null version is listed before the current version that has a version ID. Here's an example of the key ordering:

Mkey0: Master version
Vkey0: "null" non-current version
VKey0<versionID>: Current version

The listing was using only versions, but because those assumptions are incorrect, we now use both the master and the versions for each given key to ensure that we return the correct non-current versions.
2023-07-14 09:20:36 -04:00
bert-e 8716fee67d Merge branch 'q/2134/7.70/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching' into tmp/normal/q/8.1 2023-07-12 11:36:29 +00:00
williamlardier 05c93446ab
Merge remote-tracking branch 'origin/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching' into w/8.1/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching 2023-07-12 13:26:01 +02:00
Rahul Padigela bdb59a0e63 Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-349-update-node-fcntl' into w/8.1/improvement/ARSN-349-update-node-fcntl 2023-06-20 16:34:38 -07:00
Nicolas Humbert 96cbaeb821 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-347/socketio' into w/8.1/bugfix/ARSN-347/socketio 2023-06-08 11:46:23 -04:00
bert-e 15b68fa9fa Merge branch 'improvement/ARSN-344/bump' into q/8.1 2023-06-07 14:06:37 +00:00
Nicolas Humbert 51703a65f5 ARSN-344 bump version 2023-06-07 08:58:42 -04:00
bert-e 09aaa2d5ee Merge branch 'improvement/ARSN-339/time-progression-factor' into q/8.1 2023-06-07 12:10:45 +00:00
Nicolas Humbert ad39d90b6f ARSN-339 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-05 17:17:45 -04:00
Jonathan Gramain 20e9fe4adb Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-340-bump-socket-io' into w/8.1/bugfix/ARSN-340-bump-socket-io 2023-05-30 16:06:31 -07:00
bert-e 536d474f57 Merge branches 'development/8.1' and 'w/7.70/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/8.1/improvement/ARSN-335-implement-ghas 2023-05-25 17:52:46 +00:00
KillianG 25be9014c9
Bump version 8.1.101 2023-05-25 10:00:14 +00:00
KillianG ed42f24580
Add comment to explain
Issue: ARSN-337
2023-05-25 09:00:14 +00:00
KillianG ce076cb3df
Add test to check master version are skipped in v1 as well
Issue: ARSN-337
2023-05-23 13:30:57 +00:00
KillianG 4bc3de52ff
Filter delete marker from version suspended buckets
Issue: ARSN-337
2023-05-22 16:40:15 +00:00
bert-e beb5f69be3 Merge branch 'w/7.70/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/8.1/improvement/ARSN-335-implement-ghas 2023-05-19 15:59:38 +00:00
Alexander Chan 4093bf2b04 bump version 2023-04-16 19:12:15 -07:00
Alexander Chan d0bb6d5b0c ARSN-334: add mongodb list in progress indexing jobs 2023-04-16 19:12:15 -07:00
bert-e 3f7229eebe Merge branch 'improvement/ARSN-309/addMongoIndexObjectTransforms' into q/8.1 2023-04-15 00:33:49 +00:00
bert-e 7eb9d52da5 Merge branch 'improvement/ARSN-328/excludedDataStoreName' into q/8.1 2023-04-14 21:50:25 +00:00
Nicolas Humbert e216c9dd20 ARSN-328 Exclude keys based on their dataStoreName 2023-04-14 14:42:13 -07:00
williamlardier 0c1afe535b
ARN-333: bump to 8.1.97 2023-04-14 20:05:57 +02:00
williamlardier 73335ae6ec
ARN-333: fix callback for adminDb command in sharded mode with mongo driver 2023-04-14 20:05:33 +02:00
Alexander Chan 99c514e8f2 bump version 2023-04-13 15:14:47 -07:00
Alexander Chan cfd9fdcfc4 bump eslint dependency 2023-04-13 15:14:30 -07:00
Alexander Chan d809dac5e3 ARSN-309: add mongodb index object helper methods 2023-04-13 15:14:01 -07:00
williamlardier 53dac8d233
ARSN-329: bump arsenal to 8.1.96 2023-04-13 16:29:37 +02:00
williamlardier 6d5ef07eee
ARSN-329: update latest changes 2023-04-13 16:29:36 +02:00
williamlardier 272166e406
ARSN-329: update tests 2023-04-13 15:44:43 +02:00
williamlardier 3af05e672b
ARSN-329: switch to promises as callbacks are deprecated 2023-04-13 15:44:42 +02:00
williamlardier 8b0c90cb2f
ARSN-329: bump mongodb driver 2023-04-13 15:44:39 +02:00
Alexander Chan dfc9b761e2 bump version 2023-04-12 14:00:31 -07:00
Alexander Chan 04f1eb7f04 ARSN-332: bump sproxydclient dependency 2023-04-12 14:00:31 -07:00
bert-e c204b90847 Merge branch 'feature/ARSN-324-add-s3-lifecycle-expiration-to-existing-object-delete-function' into q/8.1 2023-04-11 13:48:33 +00:00
bert-e 78d6e7fd72 Merge branch 'feature/ARSN-309/supportMongoIndexing' into q/8.1 2023-04-10 17:57:52 +00:00
Alexander Chan 7768fa8d35 ARSN-309: support indexing for mongo 2023-04-07 09:51:49 -07:00
KillianG 4d9a9adc48
Bump arsenal 8.1.94
Issue: ARSN-324
2023-04-07 12:35:50 +00:00
KillianG c4804e52ee
Add unit test for internal delete object function with custom origin OP
Issue: ARSN-324
2023-04-07 12:34:10 +00:00
KillianG 671cf3a679
Add argument to internal delete object in case the call is made from lifecycle expiration to avoid raising an objectremoved:delete event
Issue: ARSN-324
2023-04-07 12:34:10 +00:00
Jonathan Gramain 9a5e27f97b Merge remote-tracking branch 'origin/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined' into w/8.1/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined 2023-04-05 15:41:59 -07:00
Jonathan Gramain a9d003c6f8 Merge remote-tracking branch 'origin/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined' into w/8.1/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined 2023-04-05 15:37:11 -07:00
Jonathan Gramain d3bdddeba3 Merge remote-tracking branch 'origin/improvement/ARSN-320-newObjectMDIsNull2' into w/8.1/improvement/ARSN-320-newObjectMDIsNull2 2023-04-04 09:31:14 -07:00
bert-e 3252f7de03 Merge branch 'feature/ARSN-317-bucketFileNullKeySupport' into tmp/octopus/w/8.1/feature/ARSN-317-bucketFileNullKeySupport 2023-04-04 16:11:03 +00:00
Nicolas Humbert 7994bf7b96 ARSN-327 Bump Arsenal 8.1.92 2023-04-03 14:38:45 -04:00
Nicolas Humbert 4be0a06c4a ARSN-326 Lifecycle listings should handle null version 2023-04-03 08:39:08 -04:00
bert-e da7dbdc51f Merge branch 'improvement/ARSN-325-bump-sproxydclient' into q/8.1 2023-03-29 11:56:40 +00:00
Will Toozs 2103ef1237
ARSN-325: bump project version 2023-03-29 13:39:55 +02:00
Will Toozs dbc1c54246
ARSN-325: bump sproxydclient 2023-03-29 13:17:19 +02:00
bert-e 6c22f8404d Merge branch 'feature/ARSN-318-bucketFileListVersionKeys' into tmp/octopus/w/8.1/feature/ARSN-318-bucketFileListVersionKeys 2023-03-28 22:58:53 +00:00
KillianG 00e03f0592
bump 8.1.90
Issue: ARSN-323
2023-03-24 16:05:23 +00:00
KillianG d453758b7d
add s3:lifecycleexpiration to the list of supported notifications events
Issue: ARSN-322
2023-03-24 15:50:42 +00:00
KillianG a964dc99c3
Add: s3:LifecycleTransition event to the list of supportedNotificationEvents
Issue: ARSN-321
2023-03-24 10:09:02 +00:00
williamlardier 5074e6c0a4
ARSN-316: bump to 8.1.89 2023-03-21 13:33:32 +01:00
williamlardier bd05dd6918
ARSN-316: add tests for new mongodb routes 2023-03-21 13:32:18 +01:00
williamlardier fbda12ce3c
ARSN-316: individually update bucket capabilities 2023-03-21 13:32:15 +01:00
Nicolas Humbert b02934bb39 ARSN-319 bump arsenal 2023-03-16 13:03:27 -04:00
Nicolas Humbert c9a444969b ARSN-312 Add logic to list orphan delete markers for Lifecycle
DelimiterOrphan used for listing orphan delete marker.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the delete markers with zero noncurrent versions before a defined date: beforeDate.
2023-03-16 12:06:27 -04:00
Nicolas Humbert 5d018860ec ARSN-311 Add logic to list non-current versions for Lifecycle
DelimiterNonCurrent used for listing non-current version.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the non-current versions that became non-current before a defined date: beforeDate.
2023-03-16 10:03:04 -04:00
bert-e 5838e02096 Merge branch 'feature/ARSN-310/listLifecycleCurrent' into q/8.1 2023-03-16 13:15:55 +00:00
Nicolas Humbert ecd600ac4b ARSN-310 Add logic to list current/master versions for Lifecycle
DelimiterCurrent used for listing current versions. The Metadata call returns the masters (M prefix) younger than a defined date: beforeDate. No extra filtering action is needed on the Metadata call response.
2023-03-16 08:40:14 -04:00
Naren ab0324da05 impr: ARSN-315 bump version to 8.1.87 2023-03-14 17:05:46 -07:00
Naren 2b353b33af Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-315-bump-version-7-10-46' into w/8.1/improvement/ARSN-315-bump-version-7-10-46 2023-03-14 17:02:30 -07:00
bert-e fd57f47be1 Merge branch 'w/7.70/improvement/ARSN-315-disable-default-metrics-collection' into tmp/octopus/w/8.1/improvement/ARSN-315-disable-default-metrics-collection 2023-03-14 23:13:08 +00:00
Jonathan Gramain 58e47e5015 ARSN-306 [8.1 only] skip PHDs in DelimiterVersions V1
Since Artesca uses PHD keys in V1 format, skip them during listing of
versions
2023-03-09 10:03:02 -08:00
Jonathan Gramain 4d782ecec6 Merge remote-tracking branch 'origin/improvement/ARSN-306-delimiterVersionsNullKeySupport' into w/8.1/improvement/ARSN-306-delimiterVersionsNullKeySupport 2023-03-09 10:02:54 -08:00
bert-e e0df67a115 Merge branch 'bugfix/ARSN-314-missingDescribeInListObjectsTest' into q/8.1 2023-03-09 17:51:50 +00:00
Naren 7e18ae77e0 impr: ARSN-313 update healthprobe server tests 2023-03-08 19:30:41 -08:00
Naren 4750118f85 impr: ARSN-313 upgrade prom-client 2023-03-08 19:10:34 -08:00
Naren c273c8b823 Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-313-upgrade-prom-client' into w/8.1/improvement/ARSN-313-upgrade-prom-client 2023-03-08 19:01:16 -08:00
Jonathan Gramain d3b50fafa8 ARSN-314 [test fix] add missing describe() in listObject
Add a missing describe() block to avoid tests running in parallel for
v0 and v1. This usually led to v1 being used for all tests.
2023-03-08 18:38:49 -08:00
Alexander Chan bf4072151f Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport' into w/8.1/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport 2023-03-01 05:26:10 -08:00
Jonathan Gramain 22fa04b7e7 Merge remote-tracking branch 'origin/feature/ARSN-307-bumpVersion' into w/8.1/feature/ARSN-307-bumpVersion 2023-02-23 23:02:17 -08:00
bert-e 4d71a834d5 Merge branch 'w/7.70/feature/ARSN-298/addHeapDataStructure' into tmp/octopus/w/8.1/feature/ARSN-298/addHeapDataStructure 2023-02-24 02:19:16 +00:00
Alexander Chan fa26a487f5 Merge remote-tracking branch 'origin/w/7.70/feature/ARSN-298/supportNewerNoncurrentVersions' into w/8.1/feature/ARSN-298/supportNewerNoncurrentVersions 2023-02-23 16:04:47 -08:00
Jonathan Gramain 66740f5aba Merge remote-tracking branch 'origin/bugfix/ARSN-284-revert' into w/8.1/bugfix/ARSN-284-revert 2023-01-30 16:16:05 +01:00
williamlardier 9c46703b89
ARSN-297: bump to 8.1.82 2023-01-23 16:49:28 +01:00
williamlardier 47672d60ce
ARSN-297: remove Version from request context 2023-01-23 16:46:32 +01:00
Jonathan Gramain 6d41d103e8 Merge remote-tracking branch 'origin/bugfix/ARSN-294-setNextMarkerToCommonPrefix' into w/8.1/bugfix/ARSN-294-setNextMarkerToCommonPrefix 2023-01-12 15:46:32 -08:00
Jonathan Gramain 890ac08dcd Merge remote-tracking branch 'origin/bugfix/ARSN-293-delimiterMasterDefaultsToV0' into w/8.1/bugfix/ARSN-293-delimiterMasterDefaultsToV0 2023-01-08 19:20:46 -08:00
Jonathan Gramain 4949b7cc35 ARSN-284 [8.1] adjust routesToMem listing test 2023-01-06 16:37:36 -08:00
Jonathan Gramain 2b6fee4e84 Merge remote-tracking branch 'origin/bugfix/ARSN-284-refactorDelimiter' into w/8.1/bugfix/ARSN-284-refactorDelimiter 2023-01-06 16:02:30 -08:00
williamlardier c460338163
ARSN-291: bump arsenal to 8.1.78 2023-01-04 14:02:35 +01:00
williamlardier f17d52b602
ARSN-291: use separate function to get specific capability 2023-01-04 14:02:35 +01:00
williamlardier a6b234b7a8
ARSN-291: new bucket field for capabilities 2023-01-04 12:19:59 +01:00
williamlardier ff353bb4d6
ARSN-291: document new field for capabilities 2022-12-26 09:26:34 +01:00
williamlardier 0f9c9c2f18
ARSN-289: bump Arsenal to 8.1.77 2022-12-20 17:20:00 +01:00
williamlardier f6b2cf2c1a
ARSN-289: bump projects for better sockets handling 2022-12-20 17:19:41 +01:00
Kerkesni ecafbae36a
bugfix: ARSN-278 bump version 2022-12-19 15:52:11 +01:00
Kerkesni d1cd7e8dba
bugfix: ARSN-278 handle getting versionId when object is versioning suspended
When replicating a versioning suspended object, we need to specify 'null'
as the encoded versionId as the versionId contained within the object's
metadata is strictly internal

In the replication processor we use getVersionId() when putting/deleting a tag.
It's used by the mongoClient to fetch the object from MongoDB, here again we
need to specify 'null' to get the versioning suspended object (cloudserver already
knows how to handle 'null' versionId and transforms it to undefined before giving
it to the mongoClient)
2022-12-19 15:51:56 +01:00
Francois Ferrand 3da6719200
Release 8.1.75
Issue: ARSN-273
2022-12-16 15:51:07 +01:00
Francois Ferrand c0dd54ef51
Support alternate azure auth method
Issue: ARSN-273
2022-12-16 15:48:17 +01:00
Francois Ferrand 7910792390
Fix commit blocks list 2022-12-16 15:46:07 +01:00
Francois Ferrand a4f4c51290
Fix mpu block id
it must be base64-encoded in new azure API.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 66c4bc52b5
AzureClient : Cleanup _errorWrapper
Make better use of async and simplify error handling.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 81cd6652d6
Use new url parser in mongoclient
This fixes a warning in logs. Old parser is deprecated, and will be
removed at some point.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 2a07f67244
Fix yarn warning
Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 1a634015ee
Upgrade azure sdk
There are a few caveats:
* The `proxy.certs` param is not used anymore (though looking at old SDK
code it may not have been supported already)
* `azureStreamingOptions/options` parameters have not been updated. The
old options (`range` and `DateUnModifiedSince`) are still used and
supported, to avoid compatibility issues.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
williamlardier 7a88a54918
ARSN-277: bump project version 2022-12-14 17:18:19 +01:00
williamlardier b25e620750
ARSN-277: use JS version of httpagent 2022-12-14 17:18:19 +01:00
williamlardier 38ef89cc83
ARSN-277: standard private repos import 2022-12-14 10:03:32 +01:00
williamlardier 1a6c828bfc
ARSN-277: update jest configuration for typescript subdeps 2022-12-13 20:07:48 +01:00
williamlardier 3d769c6960
ARSN-277: ensure install dependencies step is stable 2022-12-13 20:07:48 +01:00
williamlardier 8a27920a85
ARSN-277: update logic according to changes 2022-12-13 20:07:47 +01:00
williamlardier 7642a22176
ARSN-277: bump projects and add httpagent 2022-12-13 20:07:43 +01:00
bert-e 8f63687ef3 Merge branch 'feature/ARSN-280-abstract-update' into q/8.1 2022-11-18 15:11:34 +00:00
Kerkesni 26f45fa81a
feature: ARSN-280 bump version to 8.1.73 2022-11-18 16:00:59 +01:00
Kerkesni 76b59057f7
feature: ARSN-280 Set update event's type to delete
The update operation we do just before deleting an object, where
we set the deletion flag will be used as the deletion event as contrary
to the actual deletion event it contains object metadata.
2022-11-18 16:00:59 +01:00
Kerkesni ae0da3d605
feature: ARSN-279 support S3:ObjectRestore event notifications 2022-11-15 17:21:19 +01:00
bert-e 162d9ec46b Merge branch 'q/1944/7.10/feature/ARSN-235-update-object-before-deleting-it' into tmp/normal/q/8.1 2022-11-14 09:20:17 +00:00
Kerkesni ccd6462015
feature: ARSN-235 bump version to 8.1.72 2022-11-14 10:10:49 +01:00
Kerkesni 665c77570c
feature: ARSN-235 fix ObjectMD unit tests 2022-11-13 22:16:59 +01:00
Kerkesni 27307b397c
feature: ARSN-235 unskip unit tests in 8.x 2022-11-13 22:16:59 +01:00
Kerkesni 414eada32b
feature: ARSN-235 add functional tests 2022-11-13 22:16:59 +01:00
Kerkesni fdf0c6fe99
feature: ARSN-235 add isPHD flag to ObjectMD model
The "isPHD" flag serves showing that a master object is in a temporary
invalid state that gets repaired asynchronously after a certain period
of time. The repair either updates the metadata or deletes the master
object.

This invalid state happens when deleting the last version of an object.
Previously the "isPHD" flag was set directly inside the object metadata
without going through the ObjectMD model, which is not ideal.
2022-11-13 22:16:58 +01:00
Kerkesni 8cc0be7da2
feature: ARSN-235 add deletion flag to ObjectMD model
Deletion flag serves showing that an object is in the process of
beeing deleted, the object's metadata is updated with deletion flag
set to true before deleting it to keep a trace of the latest metadata
inside the oplog as normal mongo delete events don't contain any metadata.
2022-11-13 22:16:58 +01:00
bert-e 65231633a7 Merge branch 'feature/ARSN-235-update-object-before-deleting-it' into tmp/octopus/w/8.1/feature/ARSN-235-update-object-before-deleting-it 2022-11-13 21:16:18 +00:00
Alexander Chan 92c567414a bump version to 8.1.71 2022-11-07 16:26:38 -08:00
Alexander Chan ec55e39175 ARSN-276: putObjectVerCase3 - add check for v1 format and versioned updates
erronenous master entry is created when performing previous version
update in v1 format bucket.

added fix:
* check to see if update is to a previous version update
* check if master entry exists
* if master entry doesn't exist and operation is an update to a previous
  version, skip upsert
2022-11-07 16:22:50 -08:00
Jonathan Gramain c343820cae Merge remote-tracking branch 'origin/bugfix/ARSN-274-fixBucketPolicyActionMap' into w/8.1/bugfix/ARSN-274-fixBucketPolicyActionMap 2022-11-01 18:34:44 -07:00
williamlardier 8307a1513e
ARSN-272: bump version 2022-10-03 09:34:51 +02:00
williamlardier 706c2425fe
ARSN-272: support array of arrays for req context 2022-10-03 09:34:47 +02:00
williamlardier 8618d77de9
Merge remote-tracking branch 'origin/improvement/ARSN-270-use-standard-permission-names' into w/8.1/improvement/ARSN-270-use-standard-permission-names 2022-09-27 09:18:08 +02:00
Artem Bakalov 8abe746222 Merge remote-tracking branch 'origin/improvement/ARSN-271-bump-version' into w/8.1/improvement/ARSN-271-bump-version 2022-09-26 20:04:36 -07:00
bert-e e74cca6795 Merge branch 'bugfix/ARSN-269-listing-bug-versioned-bucket-edge-case' into tmp/octopus/w/8.1/bugfix/ARSN-269-listing-bug-versioned-bucket-edge-case 2022-09-23 23:58:08 +00:00
bert-e 1427abecb7 Merge branches 'q/1982/7.10/bugfix/ARSN-252-listing-bug-versioned-bucket' and 'w/8.1/bugfix/ARSN-252-listing-bug-versioned-bucket' into tmp/octopus/q/8.1 2022-09-16 10:30:20 +00:00
bert-e 4771ce3067 Merge branch 'bugfix/ARSN-252-listing-bug-versioned-bucket' into tmp/octopus/w/8.1/bugfix/ARSN-252-listing-bug-versioned-bucket 2022-09-16 02:26:12 +00:00
williamlardier 4e8a907d99
Merge remote-tracking branch 'origin/improvement/ARSN-267-support-updaterole-action' into w/8.1/improvement/ARSN-267-support-updaterole-action 2022-09-07 13:30:51 +02:00
Killian Gardahaut 6f42b3e64c Merge remote-tracking branch 'origin/improvement/ARSN-266-change-bucketownedbyyou-error-message' into w/8.1/improvement/ARSN-266-change-bucketownedbyyou-error-message 2022-08-24 13:27:00 +00:00
Jonathan Gramain 237872a5a3 Merge remote-tracking branch 'origin/feature/ARSN-265-release-7.10.33' into w/8.1/feature/ARSN-265-release-7.10.33 2022-08-17 16:29:30 -07:00
bert-e 390fd97edf Merge branch 'bugfix/ARSN-263/cb' into q/8.1 2022-08-17 22:50:41 +00:00
Nicolas Humbert 1c9e4eb93d bump version 2022-08-17 18:43:20 -04:00
bert-e a4f163f466 Merge branches 'w/8.1/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' and 'q/1989/7.10/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' into tmp/octopus/q/8.1 2022-08-17 22:01:22 +00:00
Nicolas Humbert 4d0cc9bc12 ARSN-263 retrieveData callback should only be called once 2022-08-17 12:41:33 -04:00
bert-e 657f969d05 Merge branch 'bugfix/ARSN-262-fixRequestContextConstructor' into tmp/octopus/w/8.1/bugfix/ARSN-262-fixRequestContextConstructor 2022-08-12 01:24:07 +00:00
bert-e b43cf22b2c Merge branch 'bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' into tmp/octopus/w/8.1/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions 2022-08-10 22:04:06 +00:00
Killian Gardahaut 46c44ccaa6 Merge remote-tracking branch 'origin/improvement/ARSN-261-bump-7-10-32' into w/8.1/improvement/ARSN-261-bump-7-10-32 2022-08-10 08:38:02 +00:00
bert-e 90c63168c1 Merge branches 'w/8.1/improvement/ARSN-257-bump-7-10-31' and 'q/1980/7.10/improvement/ARSN-257-bump-7-10-31' into tmp/octopus/q/8.1 2022-08-10 08:17:10 +00:00
Jonathan Gramain fe5f868f43 Merge remote-tracking branch 'origin/improvement/ARSN-260-findConditionKeyInefficiency' into w/8.1/improvement/ARSN-260-findConditionKeyInefficiency 2022-08-09 18:00:46 -07:00
Killian Gardahaut c0ee81eb7a Merge remote-tracking branch 'origin/improvement/ARSN-257-bump-7-10-31' into w/8.1/improvement/ARSN-257-bump-7-10-31 2022-08-09 15:35:13 +00:00
bert-e 604a0170f1 Merge branches 'w/8.1/feature/ARSN-256-supportTaggingAndAclEvents' and 'q/1978/7.10/feature/ARSN-256-supportTaggingAndAclEvents' into tmp/octopus/q/8.1 2022-08-08 19:41:51 +00:00
Killian Gardahaut 9d8f4793c9 Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-08 13:53:39 +00:00
Killian Gardahaut 69d33a3341 ARSN-253: Speedup aws URI encore function 2022-08-08 13:49:18 +00:00
Jonathan Gramain 981c9c1a23 Merge remote-tracking branch 'origin/feature/ARSN-256-supportTaggingAndAclEvents' into w/8.1/feature/ARSN-256-supportTaggingAndAclEvents 2022-08-04 17:00:45 -07:00
KillianG 806f988334
Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-03 10:13:53 +02:00
KillianG 976a05c3e5
Merge branch 'w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars' of github.com:scality/arsenal into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-03 10:03:35 +02:00
Killian Gardahaut c5004cb521 Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-02 12:42:30 +00:00
KillianG bc9cfb0b6d
ARSN-254: Fix constness problem 2022-08-02 13:14:56 +02:00
KillianG 4b6e342ff8
Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-02 13:09:01 +02:00
Kerkesni 480f5a4427
bugfix: ARSN-251 bump arsenal to 8.1.64 2022-07-22 15:15:22 +02:00
bert-e 852ae9bd0f Merge branch 'bugfix/ARSN-251-fix-azure-mpuUtils-import' into tmp/octopus/w/8.1/bugfix/ARSN-251-fix-azure-mpuUtils-import 2022-07-22 13:13:02 +00:00
Taylor McKinnon 3d77540c47 Merge remote-tracking branch 'origin/bugfix/ARSN-250/fix_getByteRangeFromSpec_edgecase' into w/8.1/bugfix/ARSN-250/fix_getByteRangeFromSpec_edgecase 2022-07-21 11:45:24 -07:00
Taylor McKinnon 4f0506cf31 Merge remote-tracking branch 'origin/improvement/ARSN-248/release_7_10_28' into w/8.1/improvement/ARSN-248/release_7_10_28 2022-07-20 14:18:01 -07:00
Nicolas Humbert d92a91f076 bump package version 2022-07-19 08:52:56 +02:00
Nicolas Humbert 28779db602 bugfix/ARSN-247 data.delete 404 errors not handled properly 2022-07-19 08:40:02 +02:00
Alexander Chan 8db16c5532 ARSN-246: fix non-current transition rule comparison
fix issue in which non-current transition rule is compared to a
transition object
2022-07-12 16:55:26 -07:00
Jordi Bertran de Balanda 33439ec215 Merge remote-tracking branch 'origin/improvement/ARSN-245-release-7.10.27' into w/8.1/improvement/ARSN-245-release-7.10.27 2022-07-12 16:12:19 +02:00
bert-e 9873c0f112 Merge branch 'bugfix/ARSN-244-missing-ismasterkey-export' into tmp/octopus/w/8.1/bugfix/ARSN-244-missing-ismasterkey-export 2022-07-11 16:05:28 +00:00
Nicolas Humbert 725a492c2c ARSN-243 bump 8.1.60 2022-07-11 11:51:26 +02:00
Nicolas Humbert e446e3e132 ARSN-242 Fix non-current version transition 2022-07-09 11:46:19 +02:00
bert-e 25c6b34a1e Merge branch 'improvement/ARSN-240/transition' into q/8.1 2022-07-08 17:54:09 +00:00
Jordi Bertran de Balanda 721d7ede93 Merge remote-tracking branch 'origin/improvement/ARSN-241-release-arsenal-7.10.26' into w/8.1/improvement/ARSN-241-release-arsenal-7.10.26 2022-07-08 15:13:10 +02:00
Nicolas Humbert fbbba32d69 Introduce x-amz-scal-transition-in-progress object md 2022-07-08 12:47:30 +02:00
Jordi Bertran de Balanda 56c1ba5c21 ARSN-239 - release arsenal 8.1.59 2022-07-08 11:02:52 +02:00
Will Toozs 73431094a3
Merge remote-tracking branch 'origin/bugfix/ARSN-238' into w/8.1/bugfix/ARSN-238 2022-07-08 09:58:02 +02:00
bert-e 5919d20fa4 Merge branch 'w/8.1/improvement/ARSN-234' into tmp/octopus/q/8.1 2022-07-06 17:18:25 +00:00
Nicolas Humbert 56665069c1 ARSN-237 bump to 8.1.58 2022-07-05 20:14:07 +02:00
Nicolas Humbert 61fe54bd73 ARSN-236 Put bucket replication to dmf is not supported 2022-07-05 15:42:52 +02:00
Francois Ferrand e227d9d5ca
Merge remote-tracking branch 'origin/improvement/ARSN-234' into w/8.1/improvement/ARSN-234 2022-07-01 18:24:06 +02:00
Francois Ferrand cdcc44d272
Merge remote-tracking branch 'origin/improvement/ARSN-233' into w/8.1/improvement/ARSN-233 2022-06-29 12:02:25 +02:00
Xin LI 5acef6895f Merge remote-tracking branch 'origin/improvement/ARSN-225-add-User-Tag-actions' into w/8.1/improvement/ARSN-225-add-User-Tag-actions 2022-06-20 18:22:20 +02:00
williamlardier f7d360fe0b
ARSN-227: bump package version and improve tags validation 2022-06-16 19:18:53 +02:00
williamlardier 0a61b43252
ARSN-227: refining type and validation 2022-06-16 19:18:52 +02:00
williamlardier c014e630be
ARSN-227: introduce BucketTag type and improve tag checking 2022-06-16 19:18:52 +02:00
williamlardier a747d5feda
ARSN-227: add unit tests for bucket tags 2022-06-16 19:18:51 +02:00
KillianG 765857071a
ARSN-227: update bucket info model 2022-06-16 19:18:51 +02:00
KillianG 91b39da7e5
ARSN-227: support bucket tags in Bucket Info 2022-06-16 19:18:50 +02:00
williamlardier 2cc6ebe9b4
ARSN-227: Add NoSuchTag error 2022-06-16 19:18:50 +02:00
williamlardier 7887d22d0d
ARSN-232: bump arsenal 2022-06-15 17:25:11 +02:00
williamlardier 2f142aea7f
ARSN-232: add missing permissions for Version 2022-06-15 17:24:51 +02:00
williamlardier 26a046c9b2
ARSN-224: bump package.json to 8.1.54 2022-06-10 14:15:02 +02:00
bert-e ab23d59daf Merge branch 'bugfix/ARSN-224-fix-models-imports' into tmp/octopus/w/8.1/bugfix/ARSN-224-fix-models-imports 2022-06-10 12:00:50 +00:00
bert-e 6950df200a Merge branch 'bugfix/ARSN-224-fix-models-imports' into tmp/octopus/w/8.1/bugfix/ARSN-224-fix-models-imports 2022-06-10 10:20:14 +00:00
williamlardier 3265d162a7
ARSN-223: bump package.json version 2022-06-10 11:21:31 +02:00
bert-e 67200d80ad Merge branch 'bugfix/ARSN-223-fix-wgm-default-import' into tmp/octopus/w/8.1/bugfix/ARSN-223-fix-wgm-default-import 2022-06-10 09:20:40 +00:00
bert-e aa2992cd9f Merge branches 'w/8.1/feature/ARSN-209-type-check-models' and 'q/1920/7.10/feature/ARSN-209-type-check-models' into tmp/octopus/q/8.1 2022-06-10 08:09:10 +00:00
williamlardier 0e2071ed3b
ARSN-221: bump package.json version to 8.1.52 2022-06-09 11:51:24 +02:00
williamlardier ad579b2bd2
Bump SproxydClient version in package.json
Integrates the Node16 bugfix of SproxydClient
in Artesca.
2022-06-09 11:49:16 +02:00
Guillaume Hivert 139da904a7 Merge remote-tracking branch 'origin/feature/ARSN-209-type-check-models' into w/8.1/feature/ARSN-209-type-check-models 2022-06-09 10:15:31 +02:00
Guillaume Hivert e8851b40c0 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-209-type-check-models 2022-06-09 10:15:21 +02:00
Naren cd9456b510 bf: ARSN-220 export isMasterKey in versioning module 2022-06-08 17:13:17 -07:00
Alexander Chan 15f07538d8 ARSN-218: enable lifecycle noncurrent version transition 2022-05-28 01:26:49 -07:00
Guillaume Hivert e95d07af12 Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:58:41 +02:00
Guillaume Hivert b21f7f3440 Fix TODOs 2022-05-25 11:55:09 +02:00
Guillaume Hivert ca2d23710f Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:28:53 +02:00
Guillaume Hivert 310fd30266 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:28:44 +02:00
Guillaume Hivert 8743e9c3ac ARSN-209 Fix imports/exports in tests 2022-05-20 18:08:57 +02:00
bert-e b2af7c0aea Merge branch 'feature/ARSN-209-type-check-models' into tmp/octopus/w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:05:39 +00:00
bert-e 58c24376aa Merge branch 'feature/ARSN-209-type-check-models' into tmp/octopus/w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:02:41 +00:00
Guillaume Hivert 62c13c1eed ARSN-209 Fix everything in 8.1 2022-05-20 18:00:57 +02:00
Guillaume Hivert ee81fa5829 Merge remote-tracking branch 'origin/feature/ARSN-209-type-check-models' into w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:57:12 +02:00
bert-e d7df1df2b6 Merge branch 'bugfix/ARSN-212-remove-assert-in-decoder' into tmp/octopus/w/8.1/bugfix/ARSN-212-remove-assert-in-decoder 2022-05-20 00:56:02 +00:00
bert-e f59b1b5e07 Merge branches 'w/8.1/feature/ARSN-201-type-check-versioning' and 'q/1894/7.10/feature/ARSN-201-type-check-versioning' into tmp/octopus/q/8.1 2022-05-19 08:51:50 +00:00
Guillaume Hivert a3418603d0 Merge remote-tracking branch 'origin/feature/ARSN-206-type-check-jsutil' into w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-18 11:35:20 +02:00
Guillaume Hivert 947ccd90d9 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-18 11:35:11 +02:00
Guillaume Hivert f460ffdb21 Merge remote-tracking branch 'origin/feature/ARSN-207-type-check-string-hash' into w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-18 11:24:56 +02:00
Guillaume Hivert dfa49c79c5 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-18 11:24:41 +02:00
Guillaume Hivert e582882883 Merge remote-tracking branch 'origin/feature/ARSN-208-type-check-db' into w/8.1/feature/ARSN-208-type-check-db 2022-05-18 11:11:30 +02:00
Guillaume Hivert dd61c1abbe Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-208-type-check-db 2022-05-18 11:10:56 +02:00
Guillaume Hivert a15f8a56e3 Merge remote-tracking branch 'origin/feature/ARSN-201-type-check-versioning' into w/8.1/feature/ARSN-201-type-check-versioning 2022-05-18 11:00:22 +02:00
Guillaume Hivert 43e82f7f33 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-201-type-check-versioning 2022-05-18 11:00:10 +02:00
bert-e d7625ced17 Merge branches 'w/8.1/feature/ARSN-205-type-check-error-utils' and 'q/1901/7.10/feature/ARSN-205-type-check-error-utils' into tmp/octopus/q/8.1 2022-05-17 15:05:31 +00:00
Guillaume Hivert a2c1989a5d Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-205-type-check-error-utils 2022-05-17 16:58:11 +02:00
bert-e 24755c8472 Merge branches 'w/8.1/feature/ARSN-204-type-check-shuffle' and 'q/1899/7.10/feature/ARSN-204-type-check-shuffle' into tmp/octopus/q/8.1 2022-05-17 08:19:19 +00:00
bert-e fb39a4095e Merge branches 'w/8.1/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' and 'q/1866/7.10/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' into tmp/octopus/q/8.1 2022-05-13 14:29:55 +00:00
bert-e 32dfba2f89 Merge branch 'bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' into tmp/octopus/w/8.1/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted 2022-05-13 14:06:42 +00:00
Guillaume Hivert a2ca197bd8 Merge remote-tracking branch 'origin/feature/ARSN-208-type-check-db' into w/8.1/feature/ARSN-208-type-check-db 2022-05-13 15:18:10 +02:00
Xin LI 3ed46f2d16 improvement: ARSN-180 bump arsenal to 8.1.48 2022-05-13 14:48:51 +02:00
williamlardier 5c936c94ee
ARSN-177: better date check 2022-05-13 14:33:13 +02:00
Xin LI f87101eef6
improvement: ARSN-197 improve code structure 2022-05-13 14:00:38 +02:00
Xin LI 14f86282b6
improvement: ARSN-197 update jsdoc 2022-05-13 13:59:37 +02:00
Xin LI f9dba52d38
improvement: ARSN-197 add index 2022-05-13 13:59:36 +02:00
Yutaka Oishi 6714aed351
improvement: ARSN-197 implement object restore request xml parser 2022-05-13 13:59:36 +02:00
williamlardier 99f96dd377
ARSN-177: accept date as valid date string after stored in the db 2022-05-13 13:59:36 +02:00
williamlardier ae08d89d7d
ARSN-177: set to undefined to clear MD 2022-05-13 13:59:35 +02:00
williamlardier c48e2948f0
ARSN-177: expose new model 2022-05-13 13:59:35 +02:00
williamlardier fc942febca
ARSN-177: better use of undefined and remove unused md field 2022-05-13 13:59:35 +02:00
williamlardier a4fe998c34
ARSN-177: complete unit tests 2022-05-13 13:59:34 +02:00
williamlardier 1460e94488
ARSN-177: return true in validator 2022-05-13 13:59:34 +02:00
williamlardier dcc7117d88
ARSN-177: add tests for new restore field 2022-05-13 13:59:33 +02:00
williamlardier 99cee367aa
ARSN-177: better isValid for class 2022-05-13 13:59:33 +02:00
williamlardier ad5a4c152d
ARSN-177: Introduce archive field in object metadata 2022-05-13 13:59:30 +02:00
bert-e b608c043f5 Merge branch 'feature/ARSN-207-type-check-string-hash' into tmp/octopus/w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-13 11:57:31 +00:00
bert-e 079c09e1ec Merge branch 'feature/ARSN-207-type-check-string-hash' into tmp/octopus/w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-13 11:55:55 +00:00
bert-e 75f07440ef Merge branch 'feature/ARSN-178-introduce-x-amz-restore-header' into q/8.1 2022-05-13 11:50:07 +00:00
bert-e 3a6bac1158 Merge branch 'feature/ARSN-206-type-check-jsutil' into tmp/octopus/w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-12 15:45:01 +00:00
bert-e f2d119326a Merge branch 'feature/ARSN-206-type-check-jsutil' into tmp/octopus/w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-12 15:44:27 +00:00
Guillaume Hivert 2a019f3788 ARSN-204 Export errorUtils 2022-05-12 17:26:38 +02:00
bert-e 5e22900c0f Merge branch 'feature/ARSN-205-type-check-error-utils' into tmp/octopus/w/8.1/feature/ARSN-205-type-check-error-utils 2022-05-12 15:25:33 +00:00
Guillaume Hivert e62ed598e8 Merge remote-tracking branch 'origin/feature/ARSN-204-type-check-shuffle' into w/8.1/feature/ARSN-204-type-check-shuffle 2022-05-12 17:20:51 +02:00
bert-e a217ad58e8 Merge branches 'w/8.1/feature/ARSN-186-type-check-clustering' and 'q/1860/7.10/feature/ARSN-186-type-check-clustering' into tmp/octopus/q/8.1 2022-05-12 14:05:31 +00:00
bert-e 10cf10daa4 Merge branch 'feature/ARSN-185-type-check-patches' into q/8.1 2022-05-12 14:01:57 +00:00
Guillaume Hivert 6ec2f99a91 Merge remote-tracking branch 'origin/development/8.1' into HEAD 2022-05-12 15:53:39 +02:00
bert-e dfd8f20bf2 Merge branch 'q/1858/7.10/feature/ARSN-183-type-check-stream' into tmp/normal/q/8.1 2022-05-12 13:52:32 +00:00
Guillaume Hivert fc17ab4299 ARSN-185 Add literal union 2022-05-12 15:51:42 +02:00
Guillaume Hivert 44f398b01f Merge remote-tracking branch 'origin/feature/ARSN-183-type-check-stream' into w/8.1/feature/ARSN-183-type-check-stream 2022-05-12 15:45:01 +02:00
Guillaume Hivert dc32d78b0f Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-183-type-check-stream 2022-05-12 15:43:56 +02:00
Guillaume Hivert 073d752ad8 Merge remote-tracking branch 'origin/bugfix/ARSN-97-stop-ignoring-ts-errors-in-yarn-install' into w/8.1/bugfix/ARSN-97-stop-ignoring-ts-errors-in-yarn-install 2022-05-12 15:25:26 +02:00
bert-e 3454e934f5 Merge branch 'feature/ARSN-201-type-check-versioning' into tmp/octopus/w/8.1/feature/ARSN-201-type-check-versioning 2022-05-12 13:18:29 +00:00
Jordi Bertran de Balanda 399fdaaed0 Merge remote-tracking branch 'origin/improvement/ARSN-203-release-7.10.24' into w/8.1/improvement/ARSN-203-release-7.10.24 2022-05-12 15:11:07 +02:00
Jordi Bertran de Balanda 5084c8f971 Merge remote-tracking branch 'origin/bugfix/ARSN-199-bugfix-https-proxy-agent' into w/8.1/bugfix/ARSN-199-bugfix-https-proxy-agent 2022-05-12 11:50:33 +02:00
williamlardier 3388de6fb6
ARSN-178: set to undefined to clear MD 2022-05-12 09:39:28 +02:00
Alexander Chan 86e9d4a356 ARSN-200: fix probe server readiness path 2022-05-10 14:26:05 -07:00
williamlardier a0010efbdd
ARSN-178: expose new model 2022-05-10 11:09:30 +02:00
Nicolas Humbert 8eb7efd58a ARSN-187 Introduce s3:PutObjectVersion action 2022-05-09 10:47:29 -07:00
williamlardier 25ae7e443b
ARSN-178: remove unused field in test 2022-05-09 16:45:14 +02:00
williamlardier 4afa1ed78d
ARSN-178: better use of undefined and remove unused md field 2022-05-09 16:45:14 +02:00
williamlardier 706dfddf5f
ARSN-178: complete unit tests 2022-05-09 16:45:13 +02:00
williamlardier 4cce306a12
ARSN-178: return true in validator 2022-05-09 16:45:13 +02:00
williamlardier f3bf6f2615
ARSN-178: better isValid for AmzRestore class 2022-05-09 16:45:13 +02:00
williamlardier bbe51b2e5e
ARSN-178: add tests for AmzRestore header 2022-05-09 16:45:12 +02:00
williamlardier 3cd06256d6
ARSN-178: add model in ObjectMD 2022-05-09 16:45:12 +02:00
Yutaka Oishi 6e42216549
ARSN-178: Add AmzRestore header and model 2022-05-09 16:45:11 +02:00
williamlardier e37712e94f
ARSN-195: bump arsenal 2022-05-09 16:28:22 +02:00
williamlardier ac30d29509
ARSN-195: add missing exports for 8.x 2022-05-09 16:25:52 +02:00
Xin LI 1f235d569d improvement: release 8.1.46 2022-05-09 15:32:39 +02:00
williamlardier 320713a764
Merge remote-tracking branch 'origin/bugfix/ARSN-195-fix-ts-migration-bugs' into w/8.1/bugfix/ARSN-195-fix-ts-migration-bugs 2022-05-09 14:59:31 +02:00
Artem Bakalov fbf686feab ARSN-194 disable short version id by default 2022-05-06 20:44:23 +00:00
Guillaume Hivert 4b795a245c ARSN-184 Fix tests 2022-05-06 16:03:36 +02:00
Guillaume Hivert 983d59d565 ARSN-184 Fix responseBody test 2022-05-06 15:58:04 +02:00
Guillaume Hivert fd7f0a1a91 ARSN-184 Fix merge 2022-05-06 15:41:42 +02:00
bert-e 459fd99316 Merge branches 'development/8.1' and 'feature/ARSN-184-type-check-s3routes' into tmp/octopus/w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-06 13:21:27 +00:00
Guillaume Hivert 235b2ac6d4 Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-06 15:19:05 +02:00
bert-e 8025ce08fe Merge branches 'w/8.1/feature/ARSN-171-type-s3-middlewares' and 'q/1844/7.10/feature/ARSN-171-type-s3-middlewares' into tmp/octopus/q/8.1 2022-05-06 12:50:22 +00:00
bert-e bffb00266f Merge branch 'dependabot/npm_and_yarn/ajv-6.12.3' into q/8.1 2022-05-05 17:00:41 +00:00
bert-e a6cd3a67e0 Merge branch 'dependabot/npm_and_yarn/node-forge-1.3.0' into q/8.1 2022-05-05 17:00:37 +00:00
dependabot[bot] 18605a9546
Bump ajv from 6.12.2 to 6.12.3
Bumps [ajv](https://github.com/ajv-validator/ajv) from 6.12.2 to 6.12.3.
- [Release notes](https://github.com/ajv-validator/ajv/releases)
- [Commits](https://github.com/ajv-validator/ajv/compare/v6.12.2...v6.12.3)

---
updated-dependencies:
- dependency-name: ajv
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 15:44:27 +00:00
dependabot[bot] 74d7fe5e68
Bump node-forge from 0.7.6 to 1.3.0
Bumps [node-forge](https://github.com/digitalbazaar/forge) from 0.7.6 to 1.3.0.
- [Release notes](https://github.com/digitalbazaar/forge/releases)
- [Changelog](https://github.com/digitalbazaar/forge/blob/main/CHANGELOG.md)
- [Commits](https://github.com/digitalbazaar/forge/compare/0.7.6...v1.3.0)

---
updated-dependencies:
- dependency-name: node-forge
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 15:42:41 +00:00
dependabot[bot] e707cf4398
Bump async from 2.6.3 to 2.6.4
Bumps [async](https://github.com/caolan/async) from 2.6.3 to 2.6.4.
- [Release notes](https://github.com/caolan/async/releases)
- [Changelog](https://github.com/caolan/async/blob/v2.6.4/CHANGELOG.md)
- [Commits](https://github.com/caolan/async/compare/v2.6.3...v2.6.4)

---
updated-dependencies:
- dependency-name: async
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 14:56:17 +00:00
bert-e 47c34a4f5c Merge branch 'dependabot/npm_and_yarn/minimist-1.2.6' into q/8.1 2022-05-05 14:33:37 +00:00
bert-e 59f7e32037 Merge branch 'feature/ARSN-179-support-restore-object' into q/8.1 2022-05-05 10:29:37 +00:00
Jordi Bertran de Balanda fb286c6403 Merge remote-tracking branch 'origin/improvement/ARSN-190-release-error-fixes' into w/8.1/improvement/ARSN-190-release-error-fixes
Release 8.1.45
2022-05-05 12:09:39 +02:00
williamlardier 7f93695300
ARSN-179: add s3 action map for RestoreObject 2022-05-05 10:10:52 +02:00
bert-e cecb5fc1b1 Merge branch 'w/8.1/bugfix/ARSN-188-fix-mongoclient-errors' into tmp/octopus/q/8.1 2022-05-05 08:07:35 +00:00
bert-e 75ba3733aa Merge branch 'bugfix/ARSN-182-error-while-listing-objects' into q/8.1 2022-05-05 06:20:16 +00:00
dependabot[bot] 7c6f5d34b8
Bump minimist from 1.2.5 to 1.2.6
Bumps [minimist](https://github.com/substack/minimist) from 1.2.5 to 1.2.6.
- [Release notes](https://github.com/substack/minimist/releases)
- [Commits](https://github.com/substack/minimist/compare/1.2.5...1.2.6)

---
updated-dependencies:
- dependency-name: minimist
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-04 18:04:18 +00:00
bert-e 7e3190a600 Merge branch 'bugfix/ARSN-188-fix-mongoclient-errors' into tmp/octopus/w/8.1/bugfix/ARSN-188-fix-mongoclient-errors 2022-05-04 12:41:33 +00:00
Jordi Bertran de Balanda e9c4a5ce99 ARSN-189 - fix probe exports 2022-05-04 11:54:13 +02:00
Guillaume Hivert f378a85799 ARSN-185 Type Check patches/locationConstraints 2022-05-03 17:54:00 +02:00
bert-e 23ea19bcb3 Merge branch 'feature/ARSN-186-type-check-clustering' into tmp/octopus/w/8.1/feature/ARSN-186-type-check-clustering 2022-05-03 15:36:22 +00:00
KillianG d2c1400cb6
ARSN-182 : Return error from callback intead of internal error 2022-05-03 17:25:35 +02:00
bert-e 6da31dfd18 Merge branch 'feature/ARSN-183-type-check-stream' into tmp/octopus/w/8.1/feature/ARSN-183-type-check-stream 2022-05-03 15:14:50 +00:00
Yutaka Oishi ee1e65d778
ARSN-179: add route for RestoreObject API 2022-05-03 15:14:12 +02:00
williamlardier 3534927ccf
ARSN-179: add action map for RestoreObject API 2022-05-03 15:11:29 +02:00
Jordi Bertran de Balanda 0e3edb847e Merge remote-tracking branch 'origin/improvement/ARSN-181-release-after-error-backward-compat' into w/8.1/improvement/ARSN-181-release-after-error-backward-compat 2022-05-03 11:04:32 +02:00
bert-e a9f9fe99a5 Merge branches 'w/8.1/feature/ARSN-175-fix-errors-backwards' and 'q/1846/7.10/feature/ARSN-175-fix-errors-backwards' into tmp/octopus/q/8.1 2022-05-02 17:33:37 +00:00
Jordi Bertran de Balanda a587f78242 Merge remote-tracking branch 'origin/feature/ARSN-175-fix-errors-backwards' into w/8.1/feature/ARSN-175-fix-errors-backwards 2022-05-02 19:26:36 +02:00
Guillaume Hivert 40e5100cd8 ARSN-173 Fix BackendInfo 2022-04-29 18:08:19 +02:00
Guillaume Hivert 0851aa1406 Merge remote-tracking branch 'origin/feature/ARSN-171-type-s3-middlewares' into w/8.1/feature/ARSN-171-type-s3-middlewares 2022-04-29 17:47:22 +02:00
bert-e 3ce4effafb Merge branch 'bugfix/ARSN-172-fix-invalid-timestamp' into tmp/octopus/w/8.1/bugfix/ARSN-172-fix-invalid-timestamp 2022-04-29 12:15:13 +00:00
bert-e b1897708e5 Merge branches 'w/8.1/feature/ARSN-161-type-network' and 'q/1839/7.10/feature/ARSN-161-type-network' into tmp/octopus/q/8.1 2022-04-29 11:59:29 +00:00
bert-e 019907e2ab Merge branch 'feature/ARSN-160-support-invalid-arguments-in-errors' into q/8.1 2022-04-29 07:59:59 +00:00
bert-e 73729c7bdb Merge branches 'development/8.1' and 'feature/ARSN-161-type-network' into tmp/octopus/w/8.1/feature/ARSN-161-type-network 2022-04-28 14:42:16 +00:00
Kerkesni 3f5e553d8a
feature: ARSN-160 add support for invalid arguments in errors 2022-04-28 15:44:04 +02:00
bert-e efea69ff70 Merge branches 'w/8.1/feature/ARSN-159-type-policy-evaluator' and 'q/1832/7.10/feature/ARSN-159-type-policy-evaluator' into tmp/octopus/q/8.1 2022-04-28 08:34:32 +00:00
Guillaume Hivert 8a2b62815b ARSN-159 Add RequesterInfo unknown fields 2022-04-28 10:26:12 +02:00
bert-e 0dbbb80bea Merge branches 'w/8.1/feature/ARSN-156/release-7.10.21' and 'q/1836/7.10/feature/ARSN-156/release-7.10.21' into tmp/octopus/q/8.1 2022-04-27 16:53:55 +00:00
Guillaume Hivert 2eecda3079 ARSN-161 Fix rest/utils.ts 2022-04-27 18:08:33 +02:00
bert-e 011606e146 Merge branch 'feature/ARSN-161-type-network' into tmp/octopus/w/8.1/feature/ARSN-161-type-network 2022-04-27 16:01:22 +00:00
Guillaume Hivert 8271b3ba21 ARSN-161 Type HealthProbeServer and Utils.ts 2022-04-27 17:59:45 +02:00
Guillaume Hivert a1b980b95b Merge remote-tracking branch 'origin/feature/ARSN-161-type-network' into w/8.1/feature/ARSN-161-type-network 2022-04-27 17:47:03 +02:00
bert-e 4c47264a78 Merge branches 'w/8.1/bugfix/ARSN-168-fix-flatten-errors' and 'q/1830/7.10/bugfix/ARSN-168-fix-flatten-errors' into tmp/octopus/q/8.1 2022-04-27 09:21:16 +00:00
bert-e f69087814e Merge branch 'bugfix/ARSN-168-fix-flatten-errors' into tmp/octopus/w/8.1/bugfix/ARSN-168-fix-flatten-errors 2022-04-27 08:19:38 +00:00
Ronnie Smith cd432fa920
Merge remote-tracking branch 'origin/feature/ARSN-156/release-7.10.21' into w/8.1/feature/ARSN-156/release-7.10.21 2022-04-26 20:15:09 -07:00
Ronnie Smith af0ab673d7
Merge remote-tracking branch 'origin/feature/ARSN-156/backport-data-retrieval-style' into w/8.1/feature/ARSN-156/backport-data-retrieval-style 2022-04-26 16:40:39 -07:00
Ronnie Smith 334edbc17b
Merge remote-tracking branch 'origin/feature/ARSN-156/backport-data-retrieval-style' into w/8.1/feature/ARSN-156/backport-data-retrieval-style 2022-04-26 15:47:14 -07:00
bert-e 271b28e59b Merge branch 'feature/ARSN-159-type-policy-evaluator' into tmp/octopus/w/8.1/feature/ARSN-159-type-policy-evaluator 2022-04-26 15:16:42 +00:00
bert-e 7f641d2755 Merge branches 'q/1828/7.10/bugfix/ARSN-167/backbeat' and 'w/8.1/bugfix/ARSN-167/backbeat' into tmp/octopus/q/8.1 2022-04-26 13:13:02 +00:00
bert-e df91750c5a Merge branch 'bugfix/ARSN-167/backbeat' into tmp/octopus/w/8.1/bugfix/ARSN-167/backbeat 2022-04-26 12:55:23 +00:00
bert-e 1f2caf6a01 Merge branches 'w/8.1/feature/ARSN-169/release-7.10.19' and 'q/1826/7.10/feature/ARSN-169/release-7.10.19' into tmp/octopus/q/8.1 2022-04-26 01:45:40 +00:00
Ronnie Smith 1333195dcd
Merge remote-tracking branch 'origin/feature/ARSN-169/release-7.10.19' into w/8.1/feature/ARSN-169/release-7.10.19 2022-04-25 18:38:40 -07:00
bert-e f822c7bad9 Merge branch 'q/1812/7.10/improvement/ARSN-157-short-IDs' into tmp/normal/q/8.1 2022-04-26 01:03:38 +00:00
bert-e b3ce76d7d8 Merge branch 'w/8.1/improvement/ARSN-157-short-IDs' into tmp/normal/q/8.1 2022-04-26 01:03:38 +00:00
Artem Bakalov 18887d10b3 Merge remote-tracking branch 'origin/improvement/ARSN-157-short-IDs' into w/8.1/improvement/ARSN-157-short-IDs 2022-04-26 00:31:05 +00:00
Ronnie Smith 223897bbff
Merge remote-tracking branch 'origin/feature/ARSN-164/rpc-error-and-other-updates' into w/8.1/feature/ARSN-164/rpc-error-and-other-updates 2022-04-25 16:47:46 -07:00
bert-e e4d888c07b Merge branch 'improvement/ARSN-165/improveMongoDBClientInterfaceLogging' into q/8.1 2022-04-25 22:50:16 +00:00
bert-e dece118ba9 Merge branch 'q/1814/7.10/improvement/ARSN-162-add-getBucketTagging-error' into tmp/normal/q/8.1 2022-04-25 16:44:52 +00:00
Will Toozs a077cc199f
ARSN-162: revert NoSuchTagSet error addition 2022-04-25 12:45:33 +02:00
bert-e b0cb6d9c0f Merge branch 'improvement/ARSN-162-add-getBucketTagging-error' into tmp/octopus/w/8.1/improvement/ARSN-162-add-getBucketTagging-error 2022-04-25 08:50:17 +00:00
Alexander Chan e0da963226 ARSN-165: getLatestVersion - skip error logs for NoSuchKey errors from getLatestVersion 2022-04-22 11:31:46 -07:00
bert-e 209f3bae44 Merge branches 'w/8.1/feature/ARSN-158-type-policy' and 'q/1810/7.10/feature/ARSN-158-type-policy' into tmp/octopus/q/8.1 2022-04-22 15:55:26 +00:00
Guillaume Hivert e311f0d83d Fix StatsModel 2022-04-22 17:47:21 +02:00
Guillaume Hivert dab763884a Merge remote-tracking branch 'origin/feature/ARSN-147-type-metrics' into w/8.1/feature/ARSN-147-type-metrics 2022-04-22 17:42:21 +02:00
Guillaume Hivert 4f22e526ee Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-147-type-metrics 2022-04-22 17:41:44 +02:00
Guillaume Hivert 3951bb289c Merge remote-tracking branch 'origin/feature/ARSN-146-type-https' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:27:36 +02:00
Guillaume Hivert b97de6505c Merge remote-tracking branch 'origin/feature/ARSN-146-type-https' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:24:35 +02:00
Guillaume Hivert a5ad298c3b Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:23:45 +02:00
bert-e 6919af95f2 Merge branches 'w/8.1/feature/ARSN-99-type-check-auth-folder' and 'q/1797/7.10/feature/ARSN-99-type-check-auth-folder' into tmp/octopus/q/8.1 2022-04-22 13:56:53 +00:00
Guillaume Hivert b94c13a115 ARSN-99 Update yarn 2022-04-22 14:45:39 +02:00
Guillaume Hivert 666da6b1aa Merge remote-tracking branch 'origin/feature/ARSN-99-type-check-auth-folder' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-22 14:43:47 +02:00
Guillaume Hivert 7192d4bc93 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-22 14:40:18 +02:00
bert-e 1523f6baa6 Merge branch 'feature/ARSN-158-type-policy' into tmp/octopus/w/8.1/feature/ARSN-158-type-policy 2022-04-20 14:04:48 +00:00
bert-e c517e4531a Merge branches 'w/8.1/bugfix/ARSN-155-export-network-http-utils' and 'q/1804/7.10/bugfix/ARSN-155-export-network-http-utils' into tmp/octopus/q/8.1 2022-04-20 13:17:43 +00:00
Kerkesni 7bcb81985a
feature: ARSN-152 add s3:ObjectAcl:put notification support 2022-04-20 14:11:52 +02:00
bert-e 68ac02ad54 Merge branch 'feature/ARSN-154-support-object-tagging-notifications' into q/8.1 2022-04-20 12:02:45 +00:00
Guillaume Hivert 0d479c82c5 Fix tests 2022-04-20 11:19:22 +02:00
Guillaume Hivert f958ed3204 Merge remote-tracking branch 'origin/feature/ARSN-147-type-metrics' into w/8.1/feature/ARSN-147-type-metrics 2022-04-20 11:12:17 +02:00
bert-e 7d80db5d7f Merge branch 'feature/ARSN-146-type-https' into tmp/octopus/w/8.1/feature/ARSN-146-type-https 2022-04-20 08:31:17 +00:00
bert-e 34ef6d0434 Merge branch 'bugfix/ARSN-155-export-network-http-utils' into tmp/octopus/w/8.1/bugfix/ARSN-155-export-network-http-utils 2022-04-19 16:10:28 +00:00
bert-e 0ce6a79961 Merge branch 'feature/ARSN-153/modify-errors-is' into tmp/octopus/w/8.1/feature/ARSN-153/modify-errors-is 2022-04-19 14:24:56 +00:00
Kerkesni 7477b881ed
feature: ARSN-154 add support for s3:ObjectTagging notifications 2022-04-19 15:49:51 +02:00
Guillaume Hivert 3874d16f42 Merge remote-tracking branch 'origin/feature/ARSN-99-type-check-auth-folder' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-19 10:53:32 +02:00
Guillaume Hivert fac5605a18 Bump version to 8.1.41 2022-04-15 11:19:36 +02:00
bert-e 72057b1efc Merge branch 'q/1736/7.10/feature/ARSN-98-migrate-errors-to-typescript' into tmp/normal/q/8.1 2022-04-15 09:03:24 +00:00
bert-e 529840fa37 Merge branch 'w/8.1/feature/ARSN-98-migrate-errors-to-typescript' into tmp/normal/q/8.1 2022-04-15 09:03:24 +00:00
Guillaume Hivert 0a5f7c4ea9 Fix JS linter 2022-04-15 10:56:24 +02:00
bert-e 0e4ac99d9d Merge branch 'feature/ARSN-142-add-replication-failed-notification-support' into q/8.1 2022-04-14 22:21:20 +00:00
KillianG 218d21b819
Merge remote-tracking branch 'origin/improvement/ARSN-148-release-arsenal-7-10-17' into w/8.1/improvement/ARSN-148-release-arsenal-7-10-17 2022-04-14 19:23:54 +02:00
bert-e 9333323301 Merge branches 'w/8.1/improvement/ARSN-140-add-get-bucket-tagging-to-action-map' and 'q/1790/7.10/improvement/ARSN-140-add-get-bucket-tagging-to-action-map' into tmp/octopus/q/8.1 2022-04-14 16:49:41 +00:00
bert-e e5929b9f91 Merge branch 'improvement/ARSN-140-add-get-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-140-add-get-bucket-tagging-to-action-map 2022-04-14 16:43:33 +00:00
bert-e 8998544c06 Merge branches 'w/8.1/improvement/ARSN-139-delete-bucket-tagging-to-action-map' and 'q/1789/7.10/improvement/ARSN-139-delete-bucket-tagging-to-action-map' into tmp/octopus/q/8.1 2022-04-14 16:37:07 +00:00
KillianG df33583aea
fixup lint 2022-04-14 18:30:56 +02:00
KillianG 050d649db5
fix conflict 2022-04-14 18:27:21 +02:00
bert-e de81f65306 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 11:58:09 +00:00
bert-e 5eaf67ac93 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 07:33:59 +00:00
bert-e 193a399ae2 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 07:30:21 +00:00
bert-e 4de18e5b26 Merge branch 'w/8.1/improvement/ARSN-144-release-7.10.16' into tmp/octopus/q/8.1 2022-04-13 16:36:57 +00:00
Jordi Bertran de Balanda c7e2743bf9 Merge remote-tracking branch 'origin/improvement/ARSN-144-release-7.10.16' into w/8.1/improvement/ARSN-144-release-7.10.16 2022-04-13 18:23:52 +02:00
Jordi Bertran de Balanda a8029d8779 ARSN-145 - release 8.1.40 2022-04-13 18:19:27 +02:00
bert-e d639f4cffe Merge branches 'w/8.1/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' and 'q/1773/7.10/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' into tmp/octopus/q/8.1 2022-04-13 12:53:23 +00:00
Guillaume Hivert b2ec34c8f2 Merge remote-tracking branch 'origin/feature/ARSN-98-migrate-errors-to-typescript' into w/8.1/feature/ARSN-98-migrate-errors-to-typescript 2022-04-13 14:19:16 +02:00
KillianG fb31f93829
Merge remote-tracking branch 'origin/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' into w/8.1/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo 2022-04-13 12:32:20 +02:00
bert-e 6c6ee31f34 Merge branch 'q/1776/7.10/feature/ARSN-128/update-package-version' into tmp/normal/q/8.1 2022-04-12 18:37:07 +00:00
Kerkesni 64351cf20d
feature: ARSN-142 add support for s3:Replication:OperationFailedReplication notification 2022-04-12 15:48:44 +02:00
Ronnie Smith b58b4d0773
Merge remote-tracking branch 'origin/feature/ARSN-128/update-package-version' into w/8.1/feature/ARSN-128/update-package-version 2022-04-11 18:21:13 -07:00
Ronnie Smith 9a0915d40e
feature: ARSN-128 fix linting issues 2022-04-06 14:47:38 -07:00
Ronnie Smith 36d3a67a68
Merge remote-tracking branch 'origin/feature/ARSN-128/move-metdata-data-from-8-to-7' into w/8.1/feature/ARSN-128/move-metdata-data-from-8-to-7 2022-04-06 14:44:26 -07:00
Xin LI 3d156a58dd bugfix: ARSN-129-downgrade-socketio 2022-03-31 14:26:00 +02:00
Xin LI 7737ec4904 bugfix: ARSN-129-upgrade-socketio-fix-critical 2022-03-31 11:39:04 +02:00
Kerkesni d18f4d10bd
bump version 8.1.38 2022-03-25 14:18:53 +01:00
Kerkesni e0bc4383cd
bugfix: ARSN-124 add missing vFormat param 2022-03-25 14:17:57 +01:00
bert-e de17f221bf Merge branch 'bugfix/ARSN-116-fix-listing-master-returning-phd' into q/8.1 2022-03-25 09:46:53 +00:00
Kerkesni d46301b498
bump version 8.1.37 2022-03-25 10:32:35 +01:00
Kerkesni 0bb2a44912
bugfix: ARSN-116 Fixed listing of master keys
When the last version of an object is deleted
a placeholder master key is created and is kept
for 15 seconds before it gets repaired.

To avoid listing the PHD keys we added a transform
stream that replaces PHD master keys with the last
version of that object

Added tests for PHD cases + fixed unit tests
2022-03-25 10:32:35 +01:00
Guillaume Hivert 2c1fb773fd ARSN-100 Forward merge 2022-03-24 15:10:11 +01:00
Xin.LI 3528c24276
Update errors/arsenalErrors.json
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2022-03-21 14:13:26 +01:00
Xin LI 6d8294d0c0 improvement: ARSN-123 bump version 2022-03-21 13:31:53 +01:00
Xin LI 23bfc17a26 improvement: ARSN-123 modify BucketAlreadyOwnedByYou error description 2022-03-21 13:27:17 +01:00
bert-e 0f6a1f2982 Merge branch 'w/8.1/bugfix/ARSN-105/locations' into tmp/octopus/q/8.1 2022-03-16 14:55:09 +00:00
Nicolas Humbert bff13f1190 ARSN-105 test: add properties to ObjectMD location property 2022-03-15 15:30:02 -04:00
bert-e c857e743c8 Merge branch 'w/7.10/bugfix/ARSN-105/locations' into tmp/octopus/w/8.1/bugfix/ARSN-105/locations 2022-03-15 18:47:38 +00:00
Kerkesni 5f8edd35e9
v8.1.34 2022-03-15 14:35:49 +01:00
Kerkesni 3c4359b696
bugfix: ARSN-115 Fix listing algo returning phd
In the metadata backend, PHD master is not created in v1 hence
it wasn't considered in the skipping method, in the Mongo implementation
however the PHD keys need to be taken into consideration as they are still
created
2022-03-15 14:35:49 +01:00
Kerkesni 8ecf1d9808
feature: ARSN-112 bump to v8.1.33 2022-03-14 18:14:43 +01:00
Kerkesni 74e4934654
feature: ARSN-83 update putObject function & tests
A new step is added to each of the putObjectVerCase functions
(except the ones that get called when versionning is off) where
we check if the version we are putting is a delete marker and if
it's the latest version then we delete the master.
2022-03-14 15:10:54 +01:00
Kerkesni eac87fc9de
feature: ARSN-83 update deleteObject functions & tests
Master version gets deleted when last version is a delete marker.
When deleting a versioned object we add a new step where we look for
the latest version if master was not found.
When a version is deleted the master is automatically created/updated
and set to a PHD value, we then look for the latest version if it's
a delete marker or if no other versions available the PHD master gets
deleted, else it gets repaired asynchronously.
2022-03-14 15:10:44 +01:00
Kerkesni e2be4d895d
feature: ARSN-83 update getObject function & tests
Master version is deleted when last version is a delete marker,
thus we don't return NoSuchKey error anymore when we don't find a master.
Now we look for the latest version and return it if it exists when master not found
2022-03-14 15:10:32 +01:00
bert-e c0f7ebbaa9 Merge branch 'feature/ARSN-76-support-new-bucket-format' into q/8.1 2022-03-14 11:00:03 +00:00
Kerkesni 60fcedc251
feature: ARSN-76 Updated the MongoClientInterface unit tests
Updated tests to use the correct object keys when querying the db
2022-03-14 11:54:31 +01:00
Kerkesni 10ef395501
feature: ARSN-76 Updated deleteObjectWithCond & putObjectWithCond functions
Updated functions to use the new object key format
Added unit test + updated functional tests to use vFormat
2022-03-14 11:54:31 +01:00
Kerkesni d1c8e67901
feature: ARSN-76 Updated listObject function
Added support to listing algo returning parameters requesting
dual synchronized listings, as now for v1 buckets master and version
keys have different prefixes

Added unit tests and updated functional tests to list in both bucket formats

Updated listMultipartUploads to call the internal listing function with the
correct params
2022-03-14 11:54:31 +01:00
Kerkesni 266aabef37
feature: ARSN-76 Updated deleteObject functions
Updated deleteObject function and it's internal
functions to support the new objet key format

Added unit tests + updated functional tests to test
both formatting versions
2022-03-14 11:54:31 +01:00
Kerkesni b63c909808
feature: ARSN-76 updated getObject function
Updated getObject function to get bucket vFormat
and use it to correctly format the object key

Added unit tests & updated functional tests to test
both formatting versions

Updated functional tests to use proper versions to avoid
confusion
2022-03-14 11:54:30 +01:00
Kerkesni 02ee339214
feature: ARSN-76 Updated putObject functions
Updated putObject function to first get the bucket vFormat
attribut and pass it to internal functions

Updated putObjectVerCase functions to use formatting helper
functions to take into account the new vFormat bucket attribut

Updated getLatestVersion to take into account the new key formatting

Added unit tests & updated functional tests to use both key formats
2022-03-14 11:54:30 +01:00
Kerkesni 5ca7f86350
feature: ARSN-76 Added support for new bucket key format attribut
Bucket metadata now has a new attribut vFormat that stores
the version of key formatting used for the bucket's objects

Added utility helper functions that format the keys according to
the new attribut + unit tests

Added chache for the vFormat attribut, because of the additional
db calls that will be added

Updated putBucketAttributes so that it only updates the required
values otherwise it overwrites the vFormat

Added helper function that gets bucket vFormat
2022-03-14 11:54:30 +01:00
Kerkesni 50a4fd8dc1
improvement: ARSN-110 document new bucket key format 2022-03-14 11:43:45 +01:00
bert-e 5de0c2a7da Merge branch 'improvement/ARSN-88-add-MongoClientInterface-tests' into q/8.1 2022-03-01 15:34:24 +00:00
Kerkesni b942516dca
improvement: ARSN-88 Fix withCond tests 2022-03-01 16:29:29 +01:00
Kerkesni 54181af522
improvement: ARSN-88 Add deleteObjectMD tests 2022-03-01 16:28:04 +01:00
Kerkesni 21af204956
improvement: ARSN-88 Add getObjectMD tests 2022-03-01 16:25:27 +01:00
Kerkesni 68a27be345
improvement: ARSN-88 Add listObject tests 2022-03-01 16:25:17 +01:00
Kerkesni 06350ffe15
improvement: ARSN-88 Add putObjectMD tests 2022-03-01 16:24:19 +01:00
Taylor McKinnon 5da4cd88ff v8.1.32 2022-02-24 11:26:58 -08:00
bert-e 6bb68ee0e3 Merge branch 'feature/ARSN-75/support_abortmpu_put' into q/8.1 2022-02-24 19:09:16 +00:00
Taylor McKinnon 9a4bae40e6 ft(ARSN-75): Add support for AbortMPU PUT 2022-02-24 10:15:15 -08:00
bert-e 54e9635cab Merge branch 'feature/ARSN-84-switch-to-jest' into q/8.1 2022-02-24 09:52:09 +00:00
Vianney Rancurel b8f803338b ft: ARSN-95 Skip missing in index.js 2022-02-23 15:32:17 -08:00
Guillaume Hivert 4a1215adb5 ARSN-84 Fix coverage by using Istanbul and Jest
Jest coverage is disfunctionning because it's not able to cover code in
spawned subprocesses. Istanbul can, so the final process is to launch
nyc and jest together. Jest emit some coverage and nyc is getting the
coverage and its own coverage to emit the proper final coverage files.
2022-02-23 14:36:34 +01:00
Guillaume Hivert fc8d7532c6 ARSN-84 Correct Jest configuration for test suites and coverage
Thanks to files renaming, we can follow as much as we can the jest
default configurations. The options are gone, and we're specifying only
the maxWorkers (because the test suite is linear, and bugs if we're
running it in parallel) and the collect coverage files.
The coverage script itself is joined into one command instead of three
to leverage the Jest builtin coverage.
2022-02-23 14:36:34 +01:00
Guillaume Hivert 1818bfe6c8 ARSN-84 Rename all test files from [name].js to [name].spec.js
In order to simplify jest configuration, we have to remane the files to
follow the jest convention (to have a .spec.js extension for test
files).
2022-02-23 14:36:34 +01:00
Guillaume Hivert 5cd929ea8a ARSN-84 Fix Jest timeout for long HealthProbeServer 2022-02-23 14:36:34 +01:00
Guillaume Hivert 1138ce43af ARSN-84 Fix Jest bug in _arsenalError
You can check out the bug at
https://github.com/facebook/jest/issues/2549.
The bug in inherent to jest and is a known bug since years, because jest
is switching the VM from node to a custom VM from jest. Jest injects
its own set of globals. The Error provided by Jest is different from
the Error provided by node and the test `err instanceof Error` is false.
Error:
```
 Expected value to be equal to:
      true
 Received:
      false
```
2022-02-23 14:36:34 +01:00
Guillaume Hivert 8b4e9cc0aa ARSN-84 Fix redis commands in functional tests
The switch from mocha to Jest introduces some tests bugs.
As far as we can tell, jest is quicker than mocha, creating some
weird behaviour: some commands send to redis (with ioredis)
are working, and some aren’t. Our conclusion is that redis needs
to queue requests offline to avoid micro-disconnections from
redis in development. Otherwise, we got the following error:
```
  - StatsModel class › should correctly record a new request by default
one increment

    assert.ifError(received, expected)

    Expected value ifError to:
      null
    Received:
      [Error: Stream isn't writeable and enableOfflineQueue options is
false]

    Message:
      ifError got unwanted exception: Stream isn't writeable and
enableOfflineQueue options is false
```
Switching enableOfflineQueue to true makes the test suite to
success.
2022-02-23 14:36:34 +01:00
Guillaume Hivert ff6ea2a6d5 ARSN-84 Fix linting with correct indentation and trailing commas 2022-02-23 14:36:34 +01:00
Guillaume Hivert 3b3600db92 ARSN-84 Introduce Jest and reconfigure ESLint
Add Jest as a test runner as a mocha replacement to have the
TS compiling on the fly and allowing mixed sources TS/JS in the
sources (and replacing the before and after of mocha with beforeAll
and afterAll of Jest), and adding some ESLint configuration to make
ESLint happy.
2022-02-23 14:35:49 +01:00
bert-e 51c5247d01 Merge branch 'bugfix/ARSN-86-reactivate-ft-tests' into q/8.1 2022-02-19 16:17:26 +00:00
Vianney Rancurel 7813a312b5 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-87-versioning-exports-missing' into w/8.1/feature/ARSN-87-versioning-exports-missing 2022-02-18 17:15:32 -08:00
Thomas Carmet 35a4552c0f ARSN-86 reactivate functional tests
Discovering by accident that we removed the functional tests
on 8.1 for arsenal a while back.
Re activating those tests
2022-02-18 16:17:07 -08:00
Vianney Rancurel 0dbdff3a00 ft: ARSN-85 export http utils for Armory 2022-02-18 15:11:16 -08:00
bert-e 80b91d724d Merge branches 'w/8.1/feature/ARSN-64-sorted-set' and 'q/1714/7.10/feature/ARSN-64-sorted-set' into tmp/octopus/q/8.1 2022-02-16 23:00:46 +00:00
bert-e 40843d4bed Merge branch 'w/7.10/improvement/ARSN-46/rollback_unneeded_changes_stab' into tmp/octopus/w/8.1/improvement/ARSN-46/rollback_unneeded_changes_stab 2022-02-16 22:56:41 +00:00
bert-e b3fd77d08f Merge branch 'feature/ARSN-64-sorted-set' into tmp/octopus/w/8.1/feature/ARSN-64-sorted-set 2022-02-16 22:49:07 +00:00
Taylor McKinnon ed6bc63e75 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-46/rollback_unneeded_changes' into w/8.1/improvement/ARSN-46/rollback_unneeded_changes 2022-02-14 11:23:46 -08:00
Rached Ben Mustapha c95f84e887 Merge remote-tracking branch 'origin/feature/ARSN-62-awsv4-signature-session-token' into w/8.1/feature/ARSN-62-awsv4-signature-session-token 2022-02-08 22:55:47 +00:00
Nicolas Humbert 3c9ab1bb99 update linter 2022-02-07 20:50:40 +01:00
Nicolas Humbert 3c30adaf85 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-21/version' into w/8.1/feature/ARSN-21/version 2022-02-07 19:34:12 +01:00
bert-e 98edeae3f2 Merge branches 'w/8.1/feature/ARSN-21/UpgradeToNode16' and 'q/1687/7.10/feature/ARSN-21/UpgradeToNode16' into tmp/octopus/q/8.1 2022-02-07 17:06:52 +00:00
bert-e 4f15e4f267 Merge branches 'development/8.1' and 'w/7.10/feature/ARSN-21/UpgradeToNode16' into tmp/octopus/w/8.1/feature/ARSN-21/UpgradeToNode16 2022-02-07 17:04:19 +00:00
Xin LI 68c5b42e6f feature: ARSN-58-add-MD-actionMap 2022-02-02 23:55:36 +01:00
Xin LI 6933bb8422 Merge remote-tracking branch 'origin/development/8.1' into development/8.1
# Conflicts:
#	package.json
2022-02-02 23:55:08 +01:00
Xin LI 7e180fcad8 feature: ARSN-58-bump-version 2022-02-02 23:51:36 +01:00
Naren 41d482cf7d bf: ARSN-61 fix merge issues
Fix merge issues from ARSN-57.
2022-02-02 14:31:52 -08:00
Nicolas Humbert 1e334924f9 fix Update document requires atomic operators 2022-02-01 15:52:53 +01:00
Naren 49239cc76e Merge remote-tracking branch 'origin/w/7.10/bugfix/ARSN-57-correct-logging-client-ip' into w/8.1/bugfix/ARSN-57-correct-logging-client-ip 2022-01-28 17:23:48 -08:00
williamlardier 8d17fcac0f
ARSN-56: bump arsenal version to v8.1.23 2022-01-26 11:48:24 +01:00
williamlardier 1c3fcc5a65
ARSN-56: fix uppercase at the beginning of actionmap 2022-01-26 11:48:05 +01:00
Ronnie Smith f5b0f1e082
Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-21/UpgradeToNode16' into w/8.1/feature/ARSN-21/UpgradeToNode16 2022-01-24 14:34:29 -08:00
williamlardier 708aab707d
ARSN-55: v8.1.22 as tag 8.1.21 already exists 2022-01-24 16:22:40 +01:00
williamlardier 3a1cbdeedb
ARSN-55: add new action map 2022-01-24 16:22:09 +01:00
bert-e faf5701248 Merge branch 'w/7.10/feature/ARSN-54/RevertNode16Changes' into tmp/octopus/w/8.1/feature/ARSN-54/RevertNode16Changes 2022-01-20 23:21:12 +00:00
Ronnie Smith 4cbb5a5dd6
feature: ARSN-54 Revert Node16 changes 2022-01-20 13:07:05 -08:00
bert-e 22eca9b61c Merge branch 'w/7.10/feature/ARSN-54/RevertNode16Changes' into tmp/octopus/w/8.1/feature/ARSN-54/RevertNode16Changes 2022-01-20 20:19:44 +00:00
Naren 59a679831b Merge remote-tracking branch 'origin/improvement/ARSN-53-bump-to-7-10-6' into w/8.1/improvement/ARSN-53-bump-to-7-10-6 2022-01-19 19:54:13 -08:00
bert-e 26da124e27 Merge branches 'w/8.1/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' and 'q/1674/7.10/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' into tmp/octopus/q/8.1 2022-01-20 01:05:54 +00:00
bert-e 47b121c17b Merge branches 'w/8.1/improvement/ARSN-21-Upgrade-Node-to-16' and 'q/1649/7.10/improvement/ARSN-21-Upgrade-Node-to-16' into tmp/octopus/q/8.1 2022-01-20 00:09:24 +00:00
Ronnie Smith c605c1e1a2
feature: ARSN-21 add missing abort method 2022-01-19 15:17:57 -08:00
bert-e 994bd0a6be Merge branch 'bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails' into tmp/octopus/w/8.1/bugfix/ARSN-50-object-retention-date-with-sub-seconds-fails 2022-01-19 22:41:56 +00:00
Ronnie Smith 1e2a6c387e
improvement: ARSN-21 remove close listener 2022-01-19 12:33:45 -08:00
Ronnie Smith 1348fc820f
Merge branch 'w/8.1/improvement/ARSN-21-Upgrade-Node-to-16' of github.com:scality/Arsenal into w/8.1/improvement/ARSN-21-Upgrade-Node-to-16 2022-01-19 12:03:38 -08:00
Ronnie Smith 79a363786f
feature: ARSN-21 update node-fnctl 2022-01-19 10:07:35 -08:00
bert-e 86e3c02126 Merge branches 'w/8.1/bugfix/ARSN-35/add-http-header-too-large-error' and 'q/1611/7.10/bugfix/ARSN-35/add-http-header-too-large-error' into tmp/octopus/q/8.1 2022-01-19 00:48:16 +00:00
bert-e 8f6731aa6a Merge branch 'w/7.10/bugfix/ARSN-35/add-http-header-too-large-error' into tmp/octopus/w/8.1/bugfix/ARSN-35/add-http-header-too-large-error 2022-01-18 17:43:38 +00:00
Artem Bakalov ea2f8ebd01 v8.1.19 2022-01-14 16:17:07 -08:00
Artem Bakalov b640bbb45e S3C-2818 - forwards 408 errors as 400 to client 2022-01-14 15:33:51 -08:00
Taylor McKinnon d9fcf275ce Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-46/add_isAborted_flag' into w/8.1/improvement/ARSN-46/add_isAborted_flag 2022-01-13 13:55:05 -08:00
Ronnie Smith 66b03695c3
feature: ARSN-21 update node-fnctl 2022-01-12 18:25:02 -08:00
Rahul Padigela 3575e651e3 clean yarn cache 2022-01-11 19:01:50 -08:00
Rahul Padigela fa19a34306 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-21-Upgrade-Node-to-16' into w/8.1/improvement/ARSN-21-Upgrade-Node-to-16 2022-01-11 18:38:44 -08:00
Xin LI 3ab7ef4e8d bugfix: ARSN-45 bump version 8.1.18 2022-01-06 10:07:21 +01:00
Xin LI e531d3eae1 bugfix: ARSN-45-mergePolicy-ignore-versionId 2022-01-05 23:24:25 +01:00
Nicolas Humbert 9ebcc9690e bump version 8.1.17 2021-12-14 10:36:50 -05:00
Nicolas Humbert 95759509cb ARSN-44 Expose backbeat metrics on standard path 2021-12-14 10:34:40 -05:00
williamlardier 6cdae52d57
improvement: ARSN-43 bump package version 2021-11-29 16:17:14 +01:00
williamlardier 995cb59db4
improvement: ARSN-43 support encrypted STS tokens 2021-11-29 15:35:37 +01:00
Alexander Chan 385e34b472 Merge remote-tracking branch 'origin/feature/ARSN-33/addExpirationHeaders' into w/8.1/feature/ARSN-33/addExpirationHeaders 2021-11-19 18:30:14 -08:00
Jonathan Gramain f102c5ec8c Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-42-addNullUploadIdField' into w/8.1/improvement/ARSN-42-addNullUploadIdField 2021-11-18 18:26:32 -08:00
bert-e e912617f02 Merge branch 'improvement/ARSN-39-support-OIDC-principals-and-conditions' into q/8.1 2021-11-12 16:02:39 +00:00
williamlardier 3abde0bc74
improvement: ARSN-39 support keycloak roles condition 2021-11-12 09:28:48 +01:00
bert-e cf49c7d8bf Merge branch 'bugfix/ARSN-40/countItemsParseContentLength' into q/8.1 2021-11-09 21:29:26 +00:00
Alexander Chan e6e49a70c9 ARSN-40: fix count-items helper to parse content-length as number
Update `MongoClientInterface::getObjectMDStats` to parse entries'
`content-length` as numbers. This is needed to avoid performing
calcuulation with poosible mixed types.

The ticket [ZENKO-3711](https://scality.atlassian.net/browse/ZENKO-3711)
tracks the source of the string typed `content-length` insert.
2021-11-09 11:07:27 -08:00
Rached Ben Mustapha 77f971957b feature: support mongodb collection sharding 2021-11-09 00:57:47 +00:00
Ronnie Smith ed1d6c12c2
feature: ARSN-34 Add patch location constraints to index
* also update package.json for new version
2021-11-05 09:35:42 -07:00
williamlardier 27f17f9535
improvement: ARSN-39 support keycloak groups condition 2021-11-05 14:58:29 +01:00
williamlardier 4658651593
improvement: ARSN-39 support OIDC principals 2021-11-05 14:54:38 +01:00
Jonathan Gramain 7af6a73b3b Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-38-replayPrefixHiddenInListings' into w/8.1/feature/ARSN-38-replayPrefixHiddenInListings 2021-11-04 15:23:54 -07:00
bert-e 8728ff5c80 Merge branch 'feature/ARSN-34_AddPatchLocations' into q/8.1 2021-11-04 21:12:47 +00:00
Ronnie Smith 7c16652e57
feature: ARSN-34 Add patch locations from cloudserver 2021-11-03 16:07:16 -07:00
bert-e 5a9d667936 Merge branch 'w/7.10/feature/ARSN-37-addUploadId' into tmp/octopus/w/8.1/feature/ARSN-37-addUploadId 2021-11-02 00:28:00 +00:00
Rahul Padigela 29dd069a5f chore: update version 2021-10-26 14:56:56 -07:00
Rahul Padigela f1793bfe51 Merge remote-tracking branch 'origin/w/7.10/improvement/ARSN-31-update-version' into w/8.1/improvement/ARSN-31-update-version 2021-10-26 14:55:38 -07:00
Rahul Padigela b42f1d3943 Merge remote-tracking branch 'origin/w/7.10/bugfix/ARSN-31-invalid-query-params' into w/8.1/bugfix/ARSN-31-invalid-query-params 2021-10-25 17:29:33 -07:00
Naren c27b359fba improvement: ARSN-30 update arsenal version
update arsenal version to 8.1.7
2021-10-22 13:47:46 -07:00
Alexandre Lavigne bb8bdbc6ea ZENKO-36446 - use bucket name in delete object tagging
(cherry picked from commit 3205ecf8c7)
2021-10-22 13:26:39 -07:00
Nicolas Humbert 413f0c9433 ARSN-27 update version to 8.1.6 2021-10-19 16:01:09 -04:00
Nicolas Humbert ab3fa2f13d ARSN-26 interrogate the default region for getBucketLocation 2021-10-19 11:01:17 -04:00
Naren bfbda5d38b improvement: ARSN-25 update version to 8.1.5 2021-10-12 13:18:12 -07:00
Naren 2e6b1791bb improvement: ARSN-24 use azure-storage@2.10.3 2021-10-11 21:42:17 -07:00
Naren 1f8cfecf43 Revert "ARSN-22 - Fix bug in `onPutOgjectTaggig` and `getObjectTagging`"
This reverts commit 6a250feea9.
2021-10-11 21:38:33 -07:00
Alexandre Lavigne 6a250feea9
ARSN-22 - Fix bug in `onPutOgjectTaggig` and `getObjectTagging`
Use actual string instead of object when building object key
(for the bucket part).
2021-10-05 11:09:00 +02:00
Thomas Carmet 0a33d4b74e ARSN-20 remove condition regarding CI behavior 2021-09-23 11:42:36 -07:00
Thomas Carmet 9a544b9890 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-20-migrate-github-actions' into w/8.1/feature/ARSN-20-migrate-github-actions 2021-09-23 11:42:11 -07:00
Ronnie Smith a2b6846e2e
bugfix: ARSN-19 Add probe utils to index.js 2021-09-17 13:38:18 -07:00
Ronnie Smith 3fdfc7196b
feature: ARSN-19 bump version 2021-09-17 11:08:20 -07:00
Ronnie Smith f602fb9601
feature: ARSN-18 and ARSN-19
* Probe server should not check for strings to handle 500
* Move sendError and sendSuccess to Util.js
* Export sendError and sendSuccess for public uses
2021-09-16 16:11:32 -07:00
Thomas Carmet c237a25448 Merge remote-tracking branch 'origin/feature/ARSN-17-fixup-7-10-mistake' into w/8.1/feature/ARSN-17-fixup-7-10-mistake 2021-08-31 10:57:53 -07:00
Thomas Carmet 5aaec6a4e6 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-17-setup-package.json' into w/8.1/feature/ARSN-17-setup-package.json 2021-08-31 10:00:01 -07:00
Thomas Carmet 11278e7334 ARSN-16 pin sproxydclient version 2021-08-30 13:44:17 -07:00
bert-e c0fe2efbc2 Merge branch 'w/7.10/feature/ARSN-12-bumpArsenalVersion-stabilization' into tmp/octopus/w/8.1/feature/ARSN-12-bumpArsenalVersion-stabilization 2021-08-26 21:48:37 +00:00
Jonathan Gramain b0633d8a13 Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-12-bumpArsenalVersion' into w/8.1/feature/ARSN-12-bumpArsenalVersion 2021-08-26 14:38:09 -07:00
bert-e b27caf5814 Merge branch 'w/7.10/feature/ARSN-12-condition-put-backport' into tmp/octopus/w/8.1/feature/ARSN-12-condition-put-backport 2021-08-25 21:07:37 +00:00
bert-e f5f6cb5692 Merge branch 'w/7.10/feature/ARSN-12-introduce-cond-put' into tmp/octopus/w/8.1/feature/ARSN-12-introduce-cond-put 2021-08-25 20:54:21 +00:00
bert-e 87ba4a7b4a Merge branches 'w/8.1/feature/ARSN-11-bump-werelogs' and 'q/1538/7.10/feature/ARSN-11-bump-werelogs' into tmp/octopus/q/8.1 2021-08-13 17:56:09 +00:00
bert-e 9ff605f875 Merge branch 'w/7.10/improvement/ARSN-13-expose-isResourceApplicable-evaluator' into tmp/octopus/w/8.1/improvement/ARSN-13-expose-isResourceApplicable-evaluator 2021-08-13 03:08:53 +00:00
Thomas Carmet 4e160db87d Merge remote-tracking branch 'origin/w/7.10/feature/ARSN-11-bump-werelogs' into w/8.1/feature/ARSN-11-bump-werelogs 2021-08-12 10:08:43 -07:00
bert-e dc698f4d5c Merge branch 'improvement/ARSN-9-kmipDeepHealthcheck' into tmp/octopus/w/8.1/improvement/ARSN-9-kmipDeepHealthcheck 2021-08-04 20:07:07 +00:00
bert-e 8c7907f753 Merge branch 'bugfix/ARSN-8_RemoveHTTPCodeAndMessageFromLog' into tmp/octopus/w/8.1/bugfix/ARSN-8_RemoveHTTPCodeAndMessageFromLog 2021-08-03 18:12:57 +00:00
bert-e 395a881d92 Merge branch 'w/7.10/bugfix/ARSN-7_SkipHeadersOn304' into tmp/octopus/w/8.1/bugfix/ARSN-7_SkipHeadersOn304 2021-07-30 23:46:09 +00:00
bert-e 3d6306d2a3 Merge branches 'w/8.1/feature/ARSN-5/addBucketInfoUIDField' and 'q/1521/7.10/feature/ARSN-5/addBucketInfoUIDField' into tmp/octopus/q/8.1 2021-07-28 16:58:33 +00:00
bert-e 681740fbe7 Merge branch 'bugfix/ARSN-6/reverse-ARSN_3' into tmp/octopus/w/8.1/bugfix/ARSN-6/reverse-ARSN_3 2021-07-28 15:10:07 +00:00
Alexander Chan d381ec14d8 Merge remote-tracking branch 'origin/feature/ARSN-5/addBucketInfoUIDField' into w/8.1/feature/ARSN-5/addBucketInfoUIDField 2021-07-27 17:05:23 -07:00
bert-e 0bdcd866bc Merge branch 'bugfix/ARSN-4-exceptionWhenKMSIsDown' into tmp/octopus/w/8.1/bugfix/ARSN-4-exceptionWhenKMSIsDown 2021-07-23 18:49:46 +00:00
Jonathan Gramain 856a1634d4 Merge remote-tracking branch 'origin/improvement/S3C-4312-backbeatEncryptionSupport-fixup' into w/8.1/improvement/S3C-4312-backbeatEncryptionSupport-fixup 2021-07-21 13:32:59 -07:00
Jonathan Gramain 2921864aac feature: ARSN-2 fix renamed encryption helper
The name of the Cloudserver config helper "isAWSServerSideEncrytion"
has been renamed to "isAWSServerSideEncryption" to fix a typo, in commit
f336541f6a

This new name needs to be fixed in the AwsBackend data location.
2021-07-20 17:31:41 -07:00
bert-e 4665f3da5c Merge branch 'feature/ARSN-2_support_per_object_sse_in_data_wrapper' into q/8.1 2021-07-20 23:51:37 +00:00
Jonathan Gramain 0df0d952d2 Merge remote-tracking branch 'origin/improvement/S3C-4312-backbeatEncryptionSupport' into w/8.1/improvement/S3C-4312-backbeatEncryptionSupport 2021-07-20 14:48:13 -07:00
bert-e 54eb3ede5f Merge branch 'bugfix/ARSN-3/remove-dirty-old-expect-header-fix' into tmp/octopus/w/8.1/bugfix/ARSN-3/remove-dirty-old-expect-header-fix 2021-07-12 14:19:53 +00:00
bert-e be4dea481d Merge branch 'bugfix/ARSN-3/remove-dirty-old-expect-header-fix' into tmp/octopus/w/8.1/bugfix/ARSN-3/remove-dirty-old-expect-header-fix 2021-07-12 14:11:54 +00:00
Rached Ben Mustapha d15e2d5df6 Merge remote-tracking branch 'origin/feature/S3C-4505-fix-user-arn-validation' into w/8.1/feature/S3C-4505-fix-user-arn-validation 2021-07-09 00:25:16 +00:00
Taylor McKinnon 93503cf505 ft(ARSN-2): Support per object encryption in data wrapper 2021-07-07 11:12:04 -07:00
bert-e 0f63de2f05 Merge branch 'bugfix/ARSN-1/remove-contentmd5-check-azure' into tmp/octopus/w/8.1/bugfix/ARSN-1/remove-contentmd5-check-azure 2021-07-06 23:35:39 +00:00
bert-e 16a5e6a550 Merge branches 'w/8.1/feature/S3C-4614/assumerole' and 'q/1495/7.10/feature/S3C-4614/assumerole' into tmp/octopus/q/8.1 2021-06-29 21:40:05 +00:00
Rached Ben Mustapha 864d2e8a28 Merge remote-tracking branch 'origin/feature/S3C-4614/assumerole' into w/8.1/feature/S3C-4614/assumerole 2021-06-29 20:35:14 +00:00
vrancurel 15703aafca Merge remote-tracking branch 'origin/feature/S3C-4552-remove-test-duplicate' into w/8.1/feature/S3C-4552-remove-test-duplicate 2021-06-29 13:04:53 -07:00
bert-e db000bc5e1 Merge branches 'w/8.1/feature/S3C-4552-tiny-version-ids' and 'q/1480/7.10/feature/S3C-4552-tiny-version-ids' into tmp/octopus/q/8.1 2021-06-29 19:27:16 +00:00
vrancurel 06c35c15a5 Merge remote-tracking branch 'origin/feature/S3C-4552-tiny-version-ids' into w/8.1/feature/S3C-4552-tiny-version-ids 2021-06-29 11:43:26 -07:00
bert-e 68c8189f53 Merge branches 'w/8.1/improvement/S3C-4110/backport' and 'q/1479/7.10/improvement/S3C-4110/backport' into tmp/octopus/q/8.1 2021-06-29 12:15:38 +00:00
bert-e 041731e6eb Merge branch 'bugfix/S3C-3744-fixEncryptionActions' into tmp/octopus/w/8.1/bugfix/S3C-3744-fixEncryptionActions 2021-06-21 23:17:35 +00:00
Nicolas Humbert d51361ce06 S3C-4110 add lifecycle tests 2021-06-10 12:55:00 -05:00
Nicolas Humbert 453fd8b722 Merge remote-tracking branch 'origin/improvement/S3C-4110/backport' into w/8.1/improvement/S3C-4110/backport 2021-06-09 18:50:31 -05:00
bert-e 2621aa7e53 Merge branches 'w/8.1/bugfix/S3C-4257_StartSeqCanBeNull' and 'q/1472/7.10/bugfix/S3C-4257_StartSeqCanBeNull' into tmp/octopus/q/8.1 2021-06-08 08:18:01 +00:00
bert-e b4aeab77b9 Merge branch 'w/7.10/bugfix/S3C-4257_StartSeqCanBeNull' into tmp/octopus/w/8.1/bugfix/S3C-4257_StartSeqCanBeNull 2021-06-08 02:49:44 +00:00
bert-e e1a3b05330 Merge branches 'w/8.1/feature/S3C-3754_add_bucketDeleteEncryption_route' and 'q/1445/7.10/feature/S3C-3754_add_bucketDeleteEncryption_route' into tmp/octopus/q/8.1 2021-05-17 17:31:25 +00:00
bert-e 0151504158 Merge branch 'feature/S3C-3754_add_bucketDeleteEncryption_route' into tmp/octopus/w/8.1/feature/S3C-3754_add_bucketDeleteEncryption_route 2021-05-17 17:28:31 +00:00
bert-e 048e8b02bc Merge branch 'bugfix/S3C-4358-add-versioned-obj-lock-actions' into tmp/octopus/w/8.1/bugfix/S3C-4358-add-versioned-obj-lock-actions 2021-05-12 23:15:43 +00:00
bert-e 1d899efec8 Merge branches 'w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion' and 'q/1436/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into tmp/octopus/q/8.1 2021-05-10 20:18:36 +00:00
Taylor McKinnon 4cb8f715e9 Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion 2021-05-10 13:15:26 -07:00
bert-e 580dda4d48 Merge branch 'w/7.10/improvement/S3C-4336_add_BucketInfoModelVersion' into tmp/octopus/w/8.1/improvement/S3C-4336_add_BucketInfoModelVersion 2021-05-10 20:02:51 +00:00
bert-e a17054e3a4
Merge branch 'w/7.10/feature/S3C-4073_AddProbeServerToIndex' into tmp/octopus/w/8.1/feature/S3C-4073_AddProbeServerToIndex 2021-05-07 10:34:05 -07:00
bert-e a8df2b7b96 Merge branch 'w/7.10/feature/S3C-4073_add-new-probe-server' into tmp/octopus/w/8.1/feature/S3C-4073_add-new-probe-server 2021-04-30 19:56:03 +00:00
Taylor McKinnon d572fc953b Merge remote-tracking branch 'origin/feature/S3C-3748_add_PutBucketEncyption_handler' into w/8.1/feature/S3C-3748_add_PutBucketEncyption_handler 2021-04-29 10:10:04 -07:00
Alexander Chan 2a78d4f413 ZENKO-3368: add auth chain backend 2021-04-24 12:47:55 -07:00
Alexander Chan d2c7165214 ZENKO-3368: reorganize auth backend files 2021-04-23 12:11:09 -07:00
bert-e 1999a586fd Merge branch 'feature/S3C-3751_add_GetBucketEncryption_route' into tmp/octopus/w/8.1/feature/S3C-3751_add_GetBucketEncryption_route 2021-04-21 18:42:12 +00:00
bert-e a1c0dd2472 Merge branches 'w/8.1/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' and 'q/1399/7.10/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' into tmp/octopus/q/8.1 2021-04-14 01:17:38 +00:00
bert-e a22032f9a5 Merge branch 'bugfix/S3C-4245_enforce_bypassgovernancemode_policy' into tmp/octopus/w/8.1/bugfix/S3C-4245_enforce_bypassgovernancemode_policy 2021-04-13 20:25:57 +00:00
bert-e dd38e32797 Merge branch 'bugfix/S3C-4239-log-consumer-readrecords-callback-error' into tmp/octopus/w/8.1/bugfix/S3C-4239-log-consumer-readrecords-callback-error 2021-04-12 17:49:16 +00:00
bert-e 274bf80720 Merge branch 'w/7.10/bugfix/S3C-4275-versionListingWithDelimiterInefficiency' into tmp/octopus/w/8.1/bugfix/S3C-4275-versionListingWithDelimiterInefficiency 2021-04-10 00:16:31 +00:00
Ronnie Smith 25bd1f6111
Merge remote-tracking branch 'origin/w/7.10/feature/S3C-4262_BackportZenkoMetrics' into w/8.1/feature/S3C-4262_BackportZenkoMetrics 2021-04-06 02:46:29 -07:00
Jonathan Gramain 2d41b034aa Merge remote-tracking branch 'origin/w/7.10/dependabot/npm_and_yarn/development/7.4/mocha-8.0.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/mocha-8.0.1 2021-04-02 13:13:53 -07:00
Rached Ben Mustapha bb8ec629bf bugfix: revert azure-storage to known working version 2021-03-31 23:15:37 -07:00
Rached Ben Mustapha 4bbaa83b87 bf: upgrade to latest hdclient 2021-03-26 15:02:02 -07:00
bert-e 58697f7915 Merge branch 'feature/S3C-4172-custom-filter' into tmp/octopus/w/8.1/feature/S3C-4172-custom-filter 2021-03-19 00:20:08 +00:00
Ronnie Smith bf4a6fe01b
feature: ZENKO-3266 Code Coverage Tracking 2021-02-19 14:21:30 -08:00
alexandre merle c703ba66e7 bugfix: S3C-2804: Use ordered bulk write and refacto condition
BulkWrite was using a value of 1 to ordered which was
not correctly understood and lead to using unordered
batch write.
Refactor the catch of 11000 error to allow a more
specific catch and use a retry before sending back
an internal error in case there is a
race condition between multiple S3 connector on the
same object versionId.
2021-02-10 18:57:38 +01:00
alexandre merle 20c77f9f85 improv: ZENKO-2153: upgrade azure node sdk
Upgrade azure node sdk in order to get fixes
2021-02-10 18:41:52 +01:00
alexandre merle edb27cc9a8 bugfix: ZENKO-3240: return error when validateAndFilterMpuParts return an error 2021-02-10 18:41:52 +01:00
alexandre merle 79e0dfa38f bugfix: ZENKO-3241: considering rangeStart 0 to be valid 2021-02-10 18:41:52 +01:00
alexandre merle e1118803e6 bugfix: ZENKO-3242: wrong check for bakend data 2021-02-10 18:41:52 +01:00
bert-e 1230e72c49 Merge branch 'w/7.10/bugfix/S3C-3962-zero-size-stream' into tmp/octopus/w/8.1/bugfix/S3C-3962-zero-size-stream 2021-02-10 17:31:08 +00:00
bert-e 372df634c4 Merge branch 'bugfix/S3C-3904-more-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-more-s3-action-logs 2021-02-05 20:01:33 +00:00
bert-e 2b96888eb7 Merge branch 'w/7.9/bugfix/S3C-3904-better-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 18:15:28 +00:00
bert-e a0909885f1 Merge branch 'w/7.9/bugfix/S3C-3904-better-s3-action-logs' into tmp/octopus/w/8.1/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 01:10:09 +00:00
alexandre merle 5d100645aa
Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-2201-econnreset-rest-client-keep-alive' into w/8.1/bugfix/S3C-2201-econnreset-rest-client-keep-alive 2021-01-25 20:58:32 +01:00
bert-e 356edf8478 Merge branches 'w/8.1/bugfix/S3C-3425-client-ip-extraction-logic' and 'q/1325/7.9/bugfix/S3C-3425-client-ip-extraction-logic' into tmp/octopus/q/8.1 2020-12-31 20:26:19 +00:00
bert-e 1cfb869631 Merge branch 'bugfix/S3C-3554-bucket-notif-iam-policy-eval' into tmp/octopus/w/8.1/bugfix/S3C-3554-bucket-notif-iam-policy-eval 2020-12-22 18:37:47 +00:00
bert-e 0403ca65fc Merge branch 'w/7.9/bugfix/S3C-3425-client-ip-extraction-logic' into tmp/octopus/w/8.1/bugfix/S3C-3425-client-ip-extraction-logic 2020-12-17 18:01:26 +00:00
Rahul Padigela 269e005198 improvement: S3C-3727 update deprecated Buffer usage 2020-12-16 17:52:15 -08:00
bert-e 10627f51d1 Merge branch 'w/7.9/improvement/S3C-3653-add-fields' into tmp/octopus/w/8.1/improvement/S3C-3653-add-fields 2020-12-02 07:29:04 +00:00
bert-e aa5f714081 Merge branch 'w/7.9/improvement/S3C-3475-add-actions-in-logs' into tmp/octopus/w/8.1/improvement/S3C-3475-add-actions-in-logs 2020-11-14 00:03:52 +00:00
Jonathan Gramain d27c0577ee bugfix: ZENKO-2866 abort request on backend if S3 client disconnects
Call request.abort() on the backend side when an S3 client
disconnects, to avoid leaking sockets. Also make sure request.abort()
through the stream destroy() call is called asynchronously from the
stream creation.
2020-11-10 11:10:19 -08:00
Jonathan Gramain ff539645ea bugfix: ZENKO-2866 unit test showing the bug
Add a unit test counting the number of open sockets after an S3 client
closes the connection before the data backend has sent a GET request:
with the fix, there should be none remaining open.
2020-11-09 17:36:23 -08:00
Jonathan Gramain e5c3bb188a test: ZENKO-2866 fix DummyObjectStream
- fix the DummyObjectStream test helper _read implementation

- separate tests of the helper itself into a separate file to ease
  reuse outside the mocha test framework
2020-11-09 17:13:41 -08:00
Jonathan Gramain 2461b5c2f7 bugfix: ZENKO-2905 avoid error callback call in external AWS GET
Don't call the callback a second time on stream error during the
execution of a GET from AWS external backend. This in turn would break
the logic up in the code and cause a crash of a worker.
2020-11-03 16:20:02 -08:00
Jonathan Gramain 747307cac2 bugfix: ZENKO-2905 failing unit test showing the double callback 2020-11-03 16:19:53 -08:00
Jonathan Gramain 5942d9d70c test: ZENKO-2905 unit test for external backend streamed GET
Add a new unit test that does a GET on external backends with a
range. For this, introduced mocking for the backend functions
getObject() (AWS style) and getBlobToStream (Azure style) that return
bytes from a virtual 1GB object.
2020-11-03 16:19:44 -08:00
bert-e 8ed84786fc Merge branch 'w/7.9/bugfix/S3C-3388-httpServerKeepAliveTimeoutOption' into tmp/octopus/w/8.1/bugfix/S3C-3388-httpServerKeepAliveTimeoutOption 2020-10-15 19:29:57 +00:00
bert-e 1e40e76bb2 Merge branches 'w/8.1/feature/S3C-3185-CredentialReport-policy-check' and 'q/1268/7.9/feature/S3C-3185-CredentialReport-policy-check' into tmp/octopus/q/8.1 2020-10-08 22:24:21 +00:00
bert-e f4058dd6ef Merge branch 'w/7.9/bugfix/S3C-3402-removeWrongErrorLog' into tmp/octopus/w/8.1/bugfix/S3C-3402-removeWrongErrorLog 2020-10-08 20:48:44 +00:00
bert-e 04f7692bad Merge branch 'w/7.9/feature/S3C-3185-CredentialReport-policy-check' into tmp/octopus/w/8.1/feature/S3C-3185-CredentialReport-policy-check 2020-10-08 18:33:33 +00:00
bert-e 32752ac504 Merge branch 'feature/S3C-1801-policy-tag-condition-keys' into tmp/octopus/w/8.1/feature/S3C-1801-policy-tag-condition-keys 2020-09-30 19:17:28 +00:00
vrancurel 549f187893 bf: ZENKO-2768 encode tags properly
- Force LogReader to use MongoUtils.unescape().
- Change MongoUtils escape/unescape to encode only the property names
and not the values.
- Add a unit test to check that the escape/unescape works.
2020-09-03 09:52:55 -07:00
bert-e 93cd582e3a Merge branch 'bugfix/S3C-3303-put-empty-notif-config' into tmp/octopus/w/8.1/bugfix/S3C-3303-put-empty-notif-config 2020-09-02 22:11:45 +00:00
vrancurel 2582108f97 bf: reserialize tags in putobjectver4
This function obtains the old tags by calling getLatestVersion() which
automatically unserialize the tags, so we should reserialize them before
update, as done e.g. in repair().
2020-09-01 15:12:39 -07:00
bert-e b25867f9c2 Merge branch 'bugfix/ZENKO-2702_hardcodedReplicaSetName' into q/8.1 2020-09-01 18:25:37 +00:00
bert-e 7b60166d08 Merge branch 'feature/S3C-3183-getAccessKeyLastUsed-policy-support' into tmp/octopus/w/8.1/feature/S3C-3183-getAccessKeyLastUsed-policy-support 2020-08-31 11:50:48 +00:00
bert-e 8887a67261 Merge branch 'feature/S3C-2798-get-bucket-notif-queuearn' into tmp/octopus/w/8.1/feature/S3C-2798-get-bucket-notif-queuearn 2020-08-26 20:19:06 +00:00
Ronnie Smith 437ecc57f9 bugfix: Use replica set config instead of rs0 2020-08-25 14:03:06 -07:00
bert-e 759f0ef949 Merge branch 'feature/S3C-2797-queue-arn-parsing' into tmp/octopus/w/8.1/feature/S3C-2797-queue-arn-parsing 2020-08-21 20:18:48 +00:00
bert-e 0014aa3467 Merge branch 'feature/S3C-2797-export-notification-configuration' into tmp/octopus/w/8.1/feature/S3C-2797-export-notification-configuration 2020-08-20 20:50:09 +00:00
Dora Korpar 1727f4bd3f ft:S3C-2797 add bucketinfo test 2020-08-20 13:13:40 -07:00
Dora Korpar d71c8eac86 Merge remote-tracking branch 'origin/feature/S3C-2797-bucketinfo-update' into w/8.1/feature/S3C-2797-bucketinfo-update 2020-08-20 11:33:10 -07:00
bert-e 7eb6304956 Merge branch 'feature/S3C-2798-get-bucket-notification' into tmp/octopus/w/8.1/feature/S3C-2798-get-bucket-notification 2020-08-20 17:29:34 +00:00
bert-e ce98e9d104 Merge branches 'w/8.1/feature/S3C-2797-put-bucket-notifications' and 'q/1231/7.8/feature/S3C-2797-put-bucket-notifications' into tmp/octopus/q/8.1 2020-08-20 17:15:22 +00:00
bert-e 36d932bbce Merge branch 'feature/S3C-2797-put-bucket-notifications' into tmp/octopus/w/8.1/feature/S3C-2797-put-bucket-notifications 2020-08-20 17:02:39 +00:00
bert-e 7f2c40cf6d Merge branch 'feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/w/8.1/feature/S3C-3229-bucketnotif-objmd-update 2020-08-10 20:33:35 +00:00
bert-e 6a78af0f39 Merge branch 'q/1160/7.8/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into tmp/normal/q/8.1 2020-07-21 00:44:31 +00:00
bert-e f73dc3dd68 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into tmp/normal/q/8.1 2020-07-21 00:44:31 +00:00
Jonathan Gramain 8ec0611d08 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/lolex-6.0.0 2020-07-20 17:41:42 -07:00
Jonathan Gramain 6baca6f1e2 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/debug-2.6.9' into w/8.1/dependabot/npm_and_yarn/development/7.4/debug-2.6.9 2020-07-20 15:50:38 -07:00
bert-e 78d62636c3 Merge branch 'w/7.8/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-07 23:38:26 +00:00
Dora Korpar 9b8f813d02 S3C-3118 remove redundant test 2020-07-01 15:56:16 -07:00
Dora Korpar 0f70366774 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3118-flatten-retention-objmd' into w/8.1/feature/S3C-3118-flatten-retention-objmd 2020-07-01 15:50:49 -07:00
bert-e fb8cf65091 Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' and 'q/1157/7.8/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' into tmp/octopus/q/8.1 2020-07-01 19:33:59 +00:00
Jonathan Gramain 7792f7c603 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2' into w/8.1/dependabot/npm_and_yarn/development/7.4/ajv-6.12.2 2020-07-01 11:47:02 -07:00
bert-e 668d90b7d0 Merge branches 'w/8.1/feature/S3C-3112_ObjectLockEnabledSetterForBucket' and 'q/1174/7.8/feature/S3C-3112_ObjectLockEnabledSetterForBucket' into tmp/octopus/q/8.1 2020-06-30 21:39:40 +00:00
bert-e c1cfc59a0e Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' and 'q/1156/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into tmp/octopus/q/8.1 2020-06-30 20:09:44 +00:00
bert-e f956b02387 Merge branch 'w/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into tmp/octopus/w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1 2020-06-30 20:07:06 +00:00
Jonathan Gramain 86bca2502e Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/temp-0.9.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/temp-0.9.1 2020-06-30 13:05:00 -07:00
bert-e 3aa49eed1d Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' and 'q/1155/7.8/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' into tmp/octopus/q/8.1 2020-06-30 19:59:56 +00:00
Jonathan Gramain a9c3b2218f Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1' into w/8.1/dependabot/npm_and_yarn/development/7.4/ipaddr.js-1.9.1 2020-06-30 12:18:18 -07:00
Jonathan Gramain f459498e18 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-2.3.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-2.3.0 2020-06-29 19:22:19 -07:00
bert-e 55323aa7a2 Merge branch 'q/1153/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into tmp/normal/q/8.1 2020-06-30 00:53:55 +00:00
bert-e a20e875908 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into tmp/normal/q/8.1 2020-06-30 00:53:55 +00:00
bert-e a3a83f5ec8 Merge branch 'q/1152/7.8/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into tmp/normal/q/8.1 2020-06-30 00:52:18 +00:00
bert-e 51d3312de8 Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into tmp/normal/q/8.1 2020-06-30 00:52:18 +00:00
Ilke 6383d14d49 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3112_ObjectLockEnabledSetterForBucket' into w/8.1/feature/S3C-3112_ObjectLockEnabledSetterForBucket 2020-06-29 14:49:35 -07:00
Jonathan Gramain 0e4035d45b Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/socket.io-client-2.3.0 2020-06-29 12:11:09 -07:00
Jonathan Gramain a18285ced8 Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0' into w/8.1/dependabot/npm_and_yarn/development/7.4/simple-glob-0.2.0 2020-06-29 12:02:12 -07:00
Rahul Padigela dc4e1829fc Merge remote-tracking branch 'origin/w/7.8/dependabot/npm_and_yarn/development/7.4/xml2js-0.4.23' into w/8.1/dependabot/npm_and_yarn/development/7.4/xml2js-0.4.23 2020-06-28 21:15:18 -07:00
bert-e 3b438e03cd Merge branch 'w/7.8/dependabot/add-v2-config-file' into tmp/octopus/w/8.1/dependabot/add-v2-config-file 2020-06-29 03:14:01 +00:00
bert-e f2787ec013 Merge branches 'w/8.1/feature/S3C-3040-object-lock-iam-policies' and 'q/1133/7.8/feature/S3C-3040-object-lock-iam-policies' into tmp/octopus/q/8.1 2020-06-26 23:12:29 +00:00
bert-e 560ccef3ec Merge branch 'w/7.8/feature/S3C-3040-object-lock-iam-policies' into tmp/octopus/w/8.1/feature/S3C-3040-object-lock-iam-policies 2020-06-26 23:09:40 +00:00
Dora Korpar 3f4ed31153 Merge remote-tracking branch 'origin/improvement/bump-version' into w/8.1/improvement/bump-version 2020-06-26 15:59:54 -07:00
Jonathan Gramain fc23f68d0f Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-2987-helperForJsonStreamParsing' into w/8.1/bugfix/S3C-2987-helperForJsonStreamParsing 2020-06-24 17:38:24 -07:00
bert-e 2a4da20c0a Merge branch 'w/7.8/feature/S3C-3069-iam-policy-support-new-apis' into tmp/octopus/w/8.1/feature/S3C-3069-iam-policy-support-new-apis 2020-06-19 22:05:37 +00:00
bert-e 14c4696482 Merge branches 'w/8.1/bugfix/S3C-2987-add-v0v1-vFormat' and 'q/1108/7.8/bugfix/S3C-2987-add-v0v1-vFormat' into tmp/octopus/q/8.1 2020-06-17 20:31:04 +00:00
bert-e 275226278f Merge branch 'w/7.8/bugfix/S3C-2987-add-v0v1-vFormat' into tmp/octopus/w/8.1/bugfix/S3C-2987-add-v0v1-vFormat 2020-06-17 18:34:04 +00:00
bert-e b4b5712df7 Merge branch 'w/7.8/improvement/S3C-3044-add-audit-log-from-vault' into tmp/octopus/w/8.1/improvement/S3C-3044-add-audit-log-from-vault 2020-06-15 13:21:44 +00:00
bert-e 750c021c37 Merge branch 'w/7.8/feature/S3C-2787-retention-parsing' into tmp/octopus/w/8.1/feature/S3C-2787-retention-parsing 2020-06-12 23:21:12 +00:00
bert-e ee4d94c0fb Merge branch 'w/7.8/feature/S3C-2788-get-retention-route' into tmp/octopus/w/8.1/feature/S3C-2788-get-retention-route 2020-06-12 19:37:11 +00:00
bert-e 98f1d219a9 Merge branch 'w/7.8/feature/S3C-2788-get-object-retention' into tmp/octopus/w/8.1/feature/S3C-2788-get-object-retention 2020-06-06 04:18:21 +00:00
Dora Korpar fb363030c0 fix linter 2020-06-05 20:49:13 -07:00
Dora Korpar 7aeb32e223 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2787-objmd-retention' into w/8.1/feature/S3C-2787-objmd-retention 2020-06-05 20:43:05 -07:00
bert-e 5bdee7eb8a Merge branch 'w/7.8/bugfix/S3C-2945_fixGetLegalHoldRoute' into tmp/octopus/w/8.1/bugfix/S3C-2945_fixGetLegalHoldRoute 2020-06-05 01:45:31 +00:00
bert-e b8fd646097 Merge branch 'w/7.8/bugfix/S3C-2899-mergeStreamDestroy' into tmp/octopus/w/8.1/bugfix/S3C-2899-mergeStreamDestroy 2020-06-01 05:41:07 +00:00
bert-e a9d6e05c6e Merge branch 'w/7.8/feature/S3C-2945_getObjectLegalHoldRoute' into tmp/octopus/w/8.1/feature/S3C-2945_getObjectLegalHoldRoute 2020-05-29 18:44:01 +00:00
Ilke dc412e8953 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2944_putObjectLegalHold' into w/8.1/feature/S3C-2944_putObjectLegalHold 2020-05-29 09:00:37 -07:00
bert-e 36b68be051 Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1delimiterVersions' and 'q/1031/7.7/bugfix/S3C-2899-vformatV1delimiterVersions' into tmp/octopus/q/8.1 2020-05-21 22:39:44 +00:00
bert-e 3f19a00b32 Merge branch 'feature/S3C-2790_SupportGetObjectLockConfig' into tmp/octopus/w/8.1/feature/S3C-2790_SupportGetObjectLockConfig 2020-05-21 22:28:12 +00:00
bert-e ea8166cf7a Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1delimiterMaster' and 'q/1028/7.7/bugfix/S3C-2899-vformatV1delimiterMaster' into tmp/octopus/q/8.1 2020-05-20 22:39:27 +00:00
bert-e c06f735e82 Merge branches 'w/8.1/bugfix/S3C-2899-vformatV1MPU' and 'q/1017/7.7/bugfix/S3C-2899-vformatV1MPU' into tmp/octopus/q/8.1 2020-05-20 21:03:53 +00:00
bert-e b8c4ae4203 Merge branch 'w/8.1/bugfix/S3C-2899-helperForListingAlgoGenMDParams' into tmp/octopus/q/8.1 2020-05-20 04:18:48 +00:00
Dora Korpar 0cf9a9cdd5 bf: ZENKO-2610 fromObj extra param 2020-05-19 17:51:47 -07:00
bert-e d201e572fd Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1delimiterVersions' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1delimiterVersions 2020-05-19 23:47:34 +00:00
bert-e 400dc24281 Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1delimiterMaster' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1delimiterMaster 2020-05-19 23:47:21 +00:00
bert-e f59cea6b34 Merge branch 'w/7.7/bugfix/S3C-2899-vformatV1MPU' into tmp/octopus/w/8.1/bugfix/S3C-2899-vformatV1MPU 2020-05-19 23:47:08 +00:00
bert-e f19feb949d Merge branch 'w/7.7/bugfix/S3C-2899-helperForListingAlgoGenMDParams' into tmp/octopus/w/8.1/bugfix/S3C-2899-helperForListingAlgoGenMDParams 2020-05-19 23:46:02 +00:00
Jonathan Gramain bbef1964d7 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2899-passVformatToListingParams' into w/8.1/bugfix/S3C-2899-passVformatToListingParams 2020-05-18 11:52:02 -07:00
bert-e 43cd5f59b0 Merge branch 'feature/S3C-2789-put-object-lock-configuration' into tmp/octopus/w/8.1/feature/S3C-2789-put-object-lock-configuration 2020-05-15 21:49:57 +00:00
bert-e dd7390ade6 Merge branches 'w/8.1/feature/S3C-2789-put-objlock-bucketinfo' and 'q/995/7.7/feature/S3C-2789-put-objlock-bucketinfo' into tmp/octopus/q/8.1 2020-05-15 18:16:23 +00:00
Dora Korpar a3739cc836 Merge remote-tracking branch 'origin/feature/S3C-2789-put-objlock-bucketinfo' into w/8.1/feature/S3C-2789-put-objlock-bucketinfo 2020-05-15 11:03:53 -07:00
bert-e 97682f56bf Merge branch 'bugfix/ZENKO-2591-correctly-encode' into q/8.1 2020-05-14 23:25:09 +00:00
bert-e ce4ca533e2 Merge branch 'w/7.7/bugfix/S3C-2899-mergeStreamTooling' into tmp/octopus/w/8.1/bugfix/S3C-2899-mergeStreamTooling 2020-05-13 22:40:45 +00:00
bert-e 26bff09887 Merge branch 'w/7.7/bugfix/S3C-2899-versioningKeyFormatConstants' into tmp/octopus/w/8.1/bugfix/S3C-2899-versioningKeyFormatConstants 2020-05-11 22:30:25 +00:00
Pepijn Van Eeckhoudt f6165146ec Correct UTF-16 surrogates URI encoding
Signed-off-by: Pepijn Van Eeckhoudt <pepijn.vaneeckhoudt@datadobi.com>
(cherry picked from commit 2c3b10521ce99129d84c9ed600d14c67ee5e41ab)
2020-05-09 09:52:04 -07:00
Ilke 9f580444f3 fix 2020-05-04 21:32:37 -07:00
Ilke 93fe6fa94d Merge remote-tracking branch 'origin/feature/S3C-2785_ObjectLockCheckToBucketInfoModel' into w/8.1/feature/S3C-2785_ObjectLockCheckToBucketInfoModel 2020-05-04 18:34:19 -07:00
Jonathan Gramain d9ff2c2060 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2726-removeSomeDefaultAttributesFromObjectMD' into w/8.1/bugfix/S3C-2726-removeSomeDefaultAttributesFromObjectMD 2020-04-22 14:35:20 -07:00
bert-e e553342616 Merge branch 'w/7.7/bugfix/S3C-2668_allow_utf8_characters_in_tags' into tmp/octopus/w/8.1/bugfix/S3C-2668_allow_utf8_characters_in_tags 2020-04-14 19:46:07 +00:00
Ilke 8a9dbc4de7 Merge remote-tracking branch 'origin/improvement/S3C-2749_UnitTestsForUrlDuration' into w/8.1/improvement/S3C-2749_UnitTestsForUrlDuration 2020-04-13 13:07:28 -07:00
Jonathan Gramain 81d05b6ea8 improvement: ZENKO-2535 add microVersionId to ObjectMD
Add a new microVersionId field that is a hex-encoded field of 64 bits
randomly generated.

Updating the microVersionId field can be useful to:

- force updates in MongoDB when no other metadata changes

- detect a change when fields change but object version does not
  change e.g. when ingesting a putObjectTagging coming from S3C to
  Zenko

- manage concurrent updates, by adding a condition on the
  microVersionId and updating it for each metadata update to perform

In order for the change to be less intrusive, it is an optional field:
if ObjectMD.updateMicroVersionId() is not called by the client, the
metadata will not contain a microVersionId field. Clients will call
this function when needed on a case-by-case basis.
2020-04-10 15:21:50 -07:00
bert-e 44b8de565f Merge branch 'feature/S3C-2729-customize-s3-presign-url-expiry' into tmp/octopus/w/8.1/feature/S3C-2729-customize-s3-presign-url-expiry 2020-04-04 00:23:49 +00:00
vrancurel 3ed66c50f6 bugfix: update master if version is gte
This change is a workaround to palliate the fact we do not have micro
version to manage micro changes such as ACLs or tags changed. Indeed in
the AWS S3 specification such changes do not trigger a new version but
update the version (and the master) in place.
2020-03-17 16:26:41 -07:00
bert-e 90e1cff9f9 Merge branch 'bugfix/ZENKO-2352-httpError424IfLocationDoesNotExist' into q/8.1 2020-02-27 22:55:48 +00:00
Jonathan Gramain 9f323b32ea bugfix: ZENKO-2352 send back HTTP 424 when location does not exist
Send back an HTTP error 424 (Failed Dependency, a WebDAV extension)
instead of HTTP 503 (Service Unavailable), when the backend client
cannot retrieve the location of the data and gets a 404 error from the
server when issuing a HEAD request. This HEAD request is triggered on
the backend when a client issues a GET request on Zenko for an object
stored in a cloud backend. The error returned to the client also
contains a more specific error code "LocationNotFound", which is not
part of AWS standard.

The immediate purpose is to have backbeat not retry on such errors, as
they might arise if e.g. a Zenko bucket is backed by an S3C location,
and the out-of-band updates lag behind, so a user might have deleted
versions on S3C before Zenko got notified of the deletion, thinking
the object is still available.

More generally, even in the hypothetic case where a server-side bug
would have deleted the data, it's better not to have the client retry
as this is a definite failure and the client will just retry vainly
until it times out. Sending back a 4xx error makes this clear to the
client that it should not retry (not until something changes on its
side, like writing the same key again).

The patch applies on all supported backends: AWS, Azure, GCP.
2020-02-27 14:47:11 -08:00
bert-e dee53c8ad8 Merge branches 'w/8.1/bugfix/S3C-2502-vault-req-ip-header-port' and 'q/953/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/q/8.1 2020-02-26 17:51:07 +00:00
bert-e 9680071e1a Merge branch 'w/7.7/bugfix/S3C-2604-listMultipleBucketMetrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-listMultipleBucketMetrics 2020-02-26 09:27:06 +00:00
bert-e 6dd3aa92a4 Merge branch 'w/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/w/8.1/bugfix/S3C-2502-vault-req-ip-header-port 2020-02-25 21:39:00 +00:00
bert-e a9618bc0bb Merge branches 'w/8.1/bugfix/S3C-2604-list-multiple-bucket-metrics' and 'q/949/7.7/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/q/8.1 2020-02-25 19:25:14 +00:00
bert-e b6042035c0 Merge branch 'w/7.7/bugfix/S3C-2604-list-multiple-bucket-metrics' into tmp/octopus/w/8.1/bugfix/S3C-2604-list-multiple-bucket-metrics 2020-02-24 15:45:41 +00:00
bert-e d2fafe8ef3 Merge branch 'w/7.7/bugfix/S3C-2623_Explicit_socket_destroyed_check-port' into tmp/octopus/w/8.1/bugfix/S3C-2623_Explicit_socket_destroyed_check-port 2020-02-24 05:28:56 +00:00
bert-e fb18cba367 Merge branch 'w/7.7/bugfix/S3C-2623_Explicit_socket_destroyed_check' into tmp/octopus/w/8.1/bugfix/S3C-2623_Explicit_socket_destroyed_check 2020-02-23 21:21:49 +00:00
bert-e bab9d5dc24 Merge branch 'w/7.7/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-05 22:59:34 +00:00
Alexander Chan e531e5e711 improvement: ZENKO-2278 count items code reorg 2020-01-16 21:17:21 +00:00
bert-e f54d356669 Merge branch 'w/8.1/bugfix/S3C-2541-algo-LRUCache' into tmp/octopus/q/8.1 2020-01-03 21:18:22 +00:00
Jonathan Gramain c1bb2ac058 bugfix: ZENKO-2261 effectively reuse sproxyd connections
Dependency update on sproxydclient fix (S3C-2527).
2019-12-30 11:49:35 -08:00
Jonathan Gramain d76eeeea89 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2541-algo-LRUCache' into w/8.1/bugfix/S3C-2541-algo-LRUCache 2019-12-27 15:38:23 -08:00
Alexander Chan ad58f66981 feature: ZENKO-2089 add mongodb conditionals
add conditional put/delete operations to allow for correct blobserver
behaviors
2019-12-11 16:02:26 -08:00
bert-e 85b5599ce2 Merge branch 'bugfix/ZENKO-2250-reworkChunkedUploadStreamHandling' into q/8.1 2019-12-11 22:24:48 +00:00
Dora Korpar 3121d29140 bf: ZENKO 2219 mongo socket timeout option 2019-12-10 14:59:55 -08:00
Jonathan Gramain a75db3122f bugfix: ZENKO-2250 rework chunked upload stream handling
Adapt changes extracted from CloudServer S3C pull request (ticket
S3C-2504) to Zenko:

- Original pull request: https://github.com/scality/cloudserver/pull/2247

- Commit: 53d382f5c3
2019-12-10 12:10:02 -08:00
bert-e d994e2ae60 Merge branch 'bugfix/ZENKO-2230-crash-on-bucket-creation-error' into q/8.1 2019-11-18 19:19:45 +00:00
Rached Ben Mustapha c443793968 feature: Abstract out locations from item count
The item count scan should not really be aware of how specifically
locations are stored and configured, this allows for flexibility.
2019-11-16 14:34:44 -08:00
Rached Ben Mustapha 517a034291 bugfix: crash on createBucket error during setup 2019-11-16 12:29:27 -08:00
Rached Ben Mustapha cc6671f37c bugfix: use default auth DB in mongo log reader 2019-11-08 11:38:29 -08:00
Rached Ben Mustapha 87bb3126a3 bugfix: generalize mongodb authentication 2019-11-05 14:39:54 -08:00
bert-e cedd08686a Merge branch 'w/7.6/bugfix/S3C-2269/ArnMatch_case_sensitive_check' into tmp/octopus/w/8.1/bugfix/S3C-2269/ArnMatch_case_sensitive_check 2019-10-08 19:39:03 +00:00
bert-e 635d2fe6d9 Merge branch 'w/7.6/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens' into tmp/octopus/w/8.1/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens 2019-10-03 22:20:29 +00:00
Jianqin Wang 9557e36438 bugfix: prevent stack err for !metaHeaders 2019-09-30 14:09:57 -07:00
bert-e 2bb0e171d8 Merge branch 'bugfix/S3C-2440-get-policy-xml-error' into tmp/octopus/w/8.1/bugfix/S3C-2440-get-policy-xml-error 2019-09-23 19:30:33 +00:00
bert-e 68f5d3c9f2 Merge branch 'bugfix/S3C-2435-fix-object-action-parse' into tmp/octopus/w/8.1/bugfix/S3C-2435-fix-object-action-parse 2019-09-17 22:11:28 +00:00
vrancurel 71caf08c19 bugfix: in some cases oplog value can be undefined
E.g. collections that do not use a value field.
2019-09-17 11:15:29 -07:00
Guillaume Gimenez 38403b84aa feature: ZENKO-2088: blob-issued-etag 2019-09-09 14:06:11 -07:00
Jianqin Wang 21610dd88d feature: helper functions for blob services 2019-09-06 15:40:08 -07:00
bbuchanan9 7566d1f0a9 Revert "bugfix: S3C-2052 Delete orphaned data"
This reverts commit 5de85713ef.
2019-08-28 16:58:31 -07:00
bbuchanan9 28415a5c9b Revert "bugfix: S3C-2052 Add error functions"
This reverts commit 9d02f86cf5.
2019-08-28 14:55:28 -07:00
Taylor McKinnon 506a9ad37d improv(ZENKO-2068): Improve MongoClientInterface checkHealth 2019-08-26 11:25:53 -07:00
bert-e 1c6e56e8ef Merge branch 'bugfix/S3C-2052/delete-orphaned-data-remaining-APIs' into q/8.1 2019-08-20 18:10:33 +00:00
bbuchanan9 9d02f86cf5 bugfix: S3C-2052 Add error functions 2019-08-20 10:31:29 -07:00
bert-e 5c4547a3a9 Merge branch 'bugfix/S3C-2396-fix-bucket-policy-parsing' into tmp/octopus/w/8.1/bugfix/S3C-2396-fix-bucket-policy-parsing 2019-08-19 19:02:28 +00:00
bbuchanan9 5de85713ef bugfix: S3C-2052 Delete orphaned data 2019-08-13 14:16:38 -07:00
Rahul Padigela 68defde532 bugfix: S3C-2369 bump sproxydclient 2019-08-09 14:51:59 -07:00
Dora Korpar 9e5d4ae95b fix bucketinfo tests 2019-08-09 14:11:33 -07:00
Dora Korpar 633ce2c069 Merge remote-tracking branch 'origin/bugfix/S3C-2276-bucketinfo-update' into w/8.1/bugfix/S3C-2276-bucketinfo-update 2019-08-09 13:40:16 -07:00
Dora Korpar 08ddc07d1c Merge remote-tracking branch 'origin/feature/S3C-2276-bucket-policy-model' into w/8.1/feature/S3C-2276-bucket-policy-model 2019-08-08 16:44:00 -07:00
Katherine Laue bc6c9c8c36 update yarn.lock file 2019-08-08 11:24:10 -07:00
bert-e 3dc9b958f7 Merge branch 'w/7.5/improvement/S3C-2352-install-yarn-frozen-lockfile' into tmp/octopus/w/8.1/improvement/S3C-2352-install-yarn-frozen-lockfile 2019-08-08 18:18:51 +00:00
vrancurel 4b5c0ff923 bf: fixing a typo introduced in the improvement
was missing an underscore. was not caught by tests.
2019-08-06 13:58:03 -07:00
vrancurel 62536f66df improvement: filters out special collections
Create a function to identify special collections.
Exclude collections starting with __ .
Nevertheless keeping explicit naming on collections
that are used directly by cloudserver even though they start with __
for sake of clarity.
Include a unit test.
2019-08-06 11:04:03 -07:00
bert-e 9032b89e6f Merge branch 'feature/S3C-2282-bucket-policy-validation' into tmp/octopus/w/8.1/feature/S3C-2282-bucket-policy-validation 2019-08-01 20:17:09 +00:00
vrancurel 9014761c70 bf: deserialize dots and dollars from oplog
To allow dots and dollars in tags we serialize them into a unicode
version. We need to properly deserialize them when reading the oplog.
2019-08-01 10:41:42 -07:00
bert-e 8d9864264d Merge branch 'w/7.5/improvement/S3C-2352-install-yarn' into tmp/octopus/w/8.1/improvement/S3C-2352-install-yarn 2019-07-30 11:35:10 -07:00
Rahul Padigela 839182292c Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2351-update-joi' into w/8.1/improvement/S3C-2351-update-joi 2019-07-29 16:05:13 -07:00
Rahul Padigela a197b2b6a4 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2351-update-joi' into w/8.0/improvement/S3C-2351-update-joi 2019-07-29 15:59:45 -07:00
bert-e adf6cfc8e4 Merge branch 'w/8.0/feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/8.1/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:38 +00:00
bert-e 40aa7d836f Merge branch 'w/7.5/feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/8.0/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:38 +00:00
bert-e 4fa15fce2a Merge branch 'w/8.0/feature/S3C-2346-bucket-policy-routes' into tmp/octopus/w/8.1/feature/S3C-2346-bucket-policy-routes 2019-07-26 17:14:33 +00:00
bert-e 279f08c870 Merge branch 'feature/S3C-2346-bucket-policy-routes' into tmp/octopus/w/8.0/feature/S3C-2346-bucket-policy-routes 2019-07-26 17:14:33 +00:00
anurag4dsb 05a8475f1c
Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2335-fixDataServerCloseSync' into w/8.1/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 16:19:21 -07:00
anurag4dsb 8c664d9076
Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2335-fixDataServerCloseSync' into w/8.0/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 16:16:06 -07:00
Jianqin Wang 77172f33f8 ft: ZENKO-1640 set blob metadata (support user md overwrites) 2019-07-15 10:51:48 -07:00
Guillaume Gimenez 0a0fe7f1da feature: ZENKO-1892: export azure models 2019-07-09 13:36:06 -07:00
Salim 6d7437a776 bf: allow delete markers on NFS files 2019-07-03 17:57:42 -07:00
bert-e 1a6174dadf Merge branch 'bugfix/ZENKO-1930' into q/8.1 2019-07-03 17:11:53 +00:00
vrancurel c57cde88bb fix the design of the putObjectVerCase4
- Repair master instead of creating PHD.
  - Note that backbeat has to be modified to use
  params.repairMaster instead of params.usePHD.
2019-06-28 15:06:08 -07:00
Rahul Padigela 6e97c01edd Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2127-upgrade-node' into w/8.1/improvement/S3C-2127-upgrade-node 2019-06-27 16:11:39 -07:00
Rahul Padigela dd6fde61bb Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2127-upgrade-node' into w/8.0/improvement/S3C-2127-upgrade-node 2019-06-27 16:06:24 -07:00
Benoit A 3e8c43e05b ZENKO-1930 don't call batchDelete inconditionnally
If backend does not expose API, don't call batchDelete

Also add a minimum of 2 keys to delete at once for the batch delete to
qualify.
2019-06-22 09:49:38 +02:00
Nicolas Humbert 633efcbc50 OB-1840 Secure MongoDb access 2019-06-19 11:42:05 -07:00
Alexander Chan d99b430ac4 bugfix: ZENKO-1908 update sproxydclient 2019-06-18 13:19:21 -07:00
philipyoo 8f71d4ff03 bf: ZENKO-1736 count items scan rework
Count items scan, called by cloudserver reportHandler,
returns metrics by aggregate counts of mongo objects
and buckets. The scan is triggered each hour, but
holds the request hostage.

The change here is to separate the aggregate scan
from the countItems call made by reportHandler.
The scan will instead be called by a kubernetes
cronjob. Results of the scan will be saved in infostore.

Bucket info and bucket count will be collected every time
still and this should not take too long.
2019-06-14 16:40:40 -07:00
Rahul Padigela d0f77cee75 bugfix: S3C-2243 fix check for location type
This fixes the check where the logic should be looking at the type
of location instead of the name to leverage batch delete. It also fixes
the format sent to the sproxydclient which expects and object with keys
as an attribute whose value is an array of sproxyd keys.
2019-06-11 19:29:51 -07:00
bert-e 4419db7b23 Merge branch 'feature/ZENKO-1842/azure-info-models' into q/8.1 2019-06-04 20:42:38 +00:00
Rahul Padigela 3672df0fc4 bugfix: S3C-1139 return success for non-existing object deletes 2019-06-03 23:30:48 -07:00
Dora Korpar 9b223bea87 improvement: S3C 1139 implement batch delete for sproxyd client 2019-06-03 10:20:17 -07:00
Guillaume Gimenez b7dfc3a9c0 feature: ZENKO-1842: azure-info-models
Added Azure info models for storage accounts, containers and blobs
2019-05-29 17:39:18 -07:00
Dora Korpar 787f66458f bf: ZENKO 1728 sproxyd put fix 2019-05-23 17:24:38 -07:00
Dora Korpar 618b179d5c bf: ZENKO 1728 sproxyd put error tests 2019-05-23 17:01:05 -07:00
bert-e e6ddad1193 Merge branch 'w/7.5/bugfix/S3C-2172-bucket-error' into tmp/octopus/w/8.0/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:47 +00:00
bert-e 6575be0050 Merge branch 'w/8.0/bugfix/S3C-2172-bucket-error' into tmp/octopus/w/8.1/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:47 +00:00
Jianqin Wang 1f7263c320 Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis
Update package-lock.json file
2019-05-20 16:19:31 -07:00
Jianqin Wang 9da1a8e1f7 Update package-lock.json file with ioredis 4.9.5 upgrade 2019-05-20 16:16:11 -07:00
Jianqin Wang 14f8690a9a Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis 2019-05-20 15:05:07 -07:00
Jianqin Wang 700cb4eb48 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-20 14:52:03 -07:00
philipyoo 7dd4dca7e5 bf: ZENKO-1718 ingestion mongo putObjectVerCase4
Add a putObjectVerCase to MongoClientInterface for ingestion
use-cases. Remove management of master versions in the
ingestion process and rely on the natural ordering of
objects stored in mongo. Get and Delete operations will
rely on internal MongoClientInterface methods for performing
relevant operations. To do this, we set PHD on master for
each object version ingested.
2019-05-08 14:53:52 -07:00
bert-e a5d248000e Merge branch 'bugfix/ZENKO-1585-zenkoUserMDTagConstant' into q/8.1 2019-05-08 21:12:51 +00:00
Taylor McKinnon dae12b245b ft(ZENKO-1652): Update MetadataWrapper for List Blobs 2019-05-07 11:29:02 -07:00
bert-e c0129eb0d7 Merge branch 'feature/ZENKO-1755-hdclient-support' into q/8.1 2019-05-07 13:44:11 +00:00
philipyoo bd0d6c1942 bf: ZENKO-1585 zenko user-metadata header constant
This constant will be used in backbeat and cloudserver as
a user metadata defined header indicating if an object has
been created in a zenko deployment
2019-05-02 13:33:05 -07:00
Jonathan Gramain ed2d393e98 refactor: ZENKO-1110 remove backbeat API routes+Metrics
Remove backbeat API routes and Metrics class, as they have been moved
to backbeat repository.
2019-04-30 15:05:10 -07:00
bert-e 886110138a Merge branch 'feature/ZENKO-1760-exposePodMetricsToPrometheus' into q/8.1 2019-04-30 20:07:52 +00:00
Jonathan Gramain 397eecb370 feature: ZENKO-1760 expose prometheus metrics in HealthProbeServer
- add a class ZenkoMetrics to manage metrics process-wide, as a thin
  layer on top of prom-client module

- add a new route '/_/monitoring/metrics' exposed by the
  HealthProbeServer that runs on pods, to expose default metrics
  (nodejs etc) and custom metrics in prometheus format. Then,
  prometheus will be able to scrape them for each pod. Ideally the
  class should be renamed, maybe to MonitoringServer, kept it for
  later as it involves a larger refactor.
2019-04-30 12:46:48 -07:00
bert-e 3623b992da Merge branch 'bugfix/ZENKO-1749-exceptionInPutBucketLifecycleConfiguration' into q/8.1 2019-04-25 23:07:44 +00:00
Jonathan Gramain 78b64bebed bugfix: ZENKO-1749 fix exception with invalid lifecycle config
An empty Filter attribute along with invalid rule caused an exception
in LifecycleConfiguration._getRuleFilter(): make it return a proper
description string.

Also renamed the function to _getRuleFilterDesc().
2019-04-24 15:13:20 -07:00
Dora Korpar e857bb5f5a bf: S3C 2120 abort mpu timeout 2019-04-24 12:38:23 -07:00
Benoit A 9c1dab1055 ZENKO-1755 HD-97 add support hdclient
* import module hdclient
* add support for hdclient in parseLC
2019-04-24 20:39:02 +02:00
bert-e e18850911e Merge branch 'bugfix/ZENKO-1738-bucketNamesWithPeriodsTrimmed' into tmp/octopus/w/8.1/bugfix/ZENKO-1738-bucketNamesWithPeriodsTrimmed 2019-04-23 23:55:56 +00:00
Jonathan Gramain 2ff9cf866d bugfix: ZENKO-1738 bucket names with period trimmed by backbeat
Fix mongoclient.ListRecordStream to properly pass on bucket names with
periods (like foo.bar), instead of truncating the name after the first
period.

Down the line, that fixes replication for objects contained in such
buckets.
2019-04-23 16:54:27 -07:00
bbuchanan9 cc6ed165dd bugfix: ZENKO-1606 MPU tagging during replication 2019-04-18 14:08:38 -07:00
Dora Korpar a6b5c21e5d bf: ZENKO 1512 metastore circular json 2019-04-17 17:02:28 -07:00
bbuchanan9 64426b1450 bugfix: ZENKO-1606 Update AWS SDK dependency 2019-04-10 09:36:14 -07:00
bert-e 160fe96b18 Merge branch 'feature/ZENKO-1616_Update_ObjectMD_model_for_azure_blob_api' into q/8.1 2019-04-02 18:39:32 +00:00
Taylor McKinnon 59290513e3 ft(ZENKO-1616): Update ObjectMD model for azure blob api 2019-04-01 14:08:03 -07:00
Rahul Padigela 6b9be35d8e bugfix: ZENKO-1681 remove deprecation warnings
Removes warning "node:24) [DEP0013] DeprecationWarning: Calling an asynchronous
 function without callback is deprecated."
2019-04-01 13:42:58 -07:00
bbuchanan9 dffcbefe9b bugfix: ZENKO-1583 Allow options for Azure delete 2019-03-29 10:07:06 -07:00
bbuchanan9 c470cfb5b1 bugfix: ZENKO-1583 Return data in head operation 2019-03-29 10:07:06 -07:00
philipyoo abcff1b04e ft: ZENKO-1661 add ingestion all metric route 2019-03-26 12:12:50 -07:00
bbuchanan9 6791d1b561 bugfix: ZENKO-1610 Non-current version transition 2019-03-21 13:13:00 -07:00
bert-e a8e0a30918 Merge branch 'feature/ZENKO-1566-addIngestionMetricRoute' into q/8.1 2019-03-15 22:17:37 +00:00
philipyoo 487fe8bf35 ft: ZENKO-1566 add ingestion metrics routes
Add ingestion metric routes to backbeat routes.
Need to add a conditional on response object to not include
bytes in response.
2019-03-15 13:06:53 -07:00
bert-e b7c84ef7d3 Merge branch 'feature/S3C-2031/kmip-arsenal-errors' into tmp/octopus/w/8.0/feature/S3C-2031/kmip-arsenal-errors 2019-03-14 23:11:34 +00:00
bert-e b55295818f Merge branch 'w/8.0/feature/S3C-2031/kmip-arsenal-errors' into tmp/octopus/w/8.1/feature/S3C-2031/kmip-arsenal-errors 2019-03-14 23:11:34 +00:00
philipyoo 0213bcfd25 rf: ZENKO-1566 backbeat route dataPoints by id
Do not pass redis keys to backbeat routes function. Instead
data points should be by common identifier (as strings) and
we can map these data points to their respective redis keys
for a given service
2019-03-12 12:20:23 -07:00
bert-e 32b0946679 Merge branch 'feature/ZENKO-1560-addClientGetterMDWrapper' into q/8.1 2019-03-12 18:11:14 +00:00
JianqinWang bef886d8ad ZENKO-1377: Metadata mock moved back into scality/backbeat repo 2019-03-11 15:21:59 -07:00
philipyoo d44c2f123e ft: ZENKO-1560 add mongo get ingestion buckets
Add method to MongoClientInterface for fetching
ingestion buckets. Extend through MetadataWrapper.
2019-03-11 15:02:11 -07:00
bert-e f199d52c54 Merge branches 'w/8.1/feature/S3C-2002-admin-service' and 'q/722/8.0/feature/S3C-2002-admin-service' into tmp/octopus/q/8.1 2019-03-08 00:17:06 +00:00
bert-e b9c419dde7 Merge branches 'w/8.0/feature/S3C-2002-admin-service' and 'q/722/7.5/feature/S3C-2002-admin-service' into tmp/octopus/q/8.0 2019-03-08 00:17:05 +00:00
bert-e 5cf3948ba2 Merge branch 'w/8.0/feature/S3C-2002-admin-service' into tmp/octopus/w/8.1/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
bert-e 226088c8fb Merge branch 'w/7.5/feature/S3C-2002-admin-service' into tmp/octopus/w/8.0/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
Rahul Padigela bca10414bc Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2017-berte-fix' into w/8.1/bugfix/S3C-2017-berte-fix 2019-03-07 10:16:36 -08:00
bert-e 8f0cab8d91 Merge branch 'w/7.5/bugfix/S3C-2017-berte-fix' into tmp/octopus/w/8.0/bugfix/S3C-2017-berte-fix 2019-03-07 18:15:10 +00:00
Jonathan Gramain 40c234bb5f feature: ZENKO-1420 createAggregateETag helper cleanup
To increase reusability of createAggregateETag() helper function, pass
it a single argument which is an array of individual part ETags.
2019-03-06 15:57:17 -08:00
bert-e 26e2b5e425 Merge branch 'w/8.1/feature/S3C-1968/kmip-highlevel-driver' into tmp/octopus/q/8.1 2019-03-05 19:57:25 +00:00
bert-e df5a61cb8d Merge branch 'feature/ZENKO-1402-move-data-wrapper' into q/8.1 2019-03-04 19:26:38 +00:00
bert-e b01a390c46 Merge branch 'w/8.0/feature/S3C-1968/kmip-highlevel-driver' into tmp/octopus/w/8.1/feature/S3C-1968/kmip-highlevel-driver 2019-03-02 00:55:37 +00:00
Guillaume Gimenez 87103f83e1 Merge remote-tracking branch 'origin/feature/S3C-1968/kmip-highlevel-driver' into w/8.0/feature/S3C-1968/kmip-highlevel-driver 2019-03-01 16:53:03 -08:00
bert-e 9ba5d64cd2 Merge branches 'w/8.1/feature/S3C-1967/kmip-lowlevel-driver' and 'q/705/8.0/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/q/8.1 2019-03-02 00:46:13 +00:00
bert-e f4d4c9b76e Merge branches 'w/8.0/feature/S3C-1967/kmip-lowlevel-driver' and 'q/705/7.5/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/q/8.0 2019-03-02 00:46:12 +00:00
bert-e 2c149ea9b1 Merge branch 'w/8.1/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/q/8.1 2019-03-02 00:02:51 +00:00
philipyoo 735ad74bda bf: ZENKO-1424 add bucket info to MD mock
Changes in this commit:
- Add the bucket informations route to Metadata mock.
  This is to support changes to tests in backbeat. Route
  only fetches cseq of bucket
2019-03-01 12:20:44 -08:00
bert-e 1636c87556 Merge branch 'feature/ZENKO-1452-mdMockStripVersionIds' into q/8.1 2019-03-01 20:16:09 +00:00
bert-e 8e2d6d42a8 Merge branch 'w/8.0/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/8.1/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:52 +00:00
bert-e f11d6e223d Merge branch 'w/7.5/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/8.0/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:51 +00:00
philipyoo ebe2d1f24d ft: ZENKO-1452 md mock remove version id in url
For metadata mock, remove the version id in the url. The
expected response is the same.
2019-02-28 15:34:50 -08:00
bert-e 6a1bc69336 Merge branch 'w/8.0/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/w/8.1/feature/S3C-1967/kmip-lowlevel-driver 2019-02-28 20:49:53 +00:00
bert-e 0144158a37 Merge branch 'feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/w/8.0/feature/S3C-1967/kmip-lowlevel-driver 2019-02-28 20:49:53 +00:00
bert-e aea19c9cc2 Merge branch 'w/8.0/feature/S3C-1966/kmip-tls-transport' into tmp/octopus/w/8.1/feature/S3C-1966/kmip-tls-transport 2019-02-28 19:50:13 +00:00
bert-e daaeb5637a Merge branch 'feature/S3C-1966/kmip-tls-transport' into tmp/octopus/w/8.0/feature/S3C-1966/kmip-tls-transport 2019-02-28 19:50:12 +00:00
Dora Korpar c479933448 ft: ZENKO 1402 move data wrapper 2019-02-25 16:48:40 -08:00
JianqinWang f804aa9657 ZENKO-1377: update Arsenal mock for ingestion reader tests 2019-02-25 09:38:40 -08:00
Jonathan Gramain ad35b9ec78 Merge remote-tracking branch 'origin/bugfix/ZENKO-1522-isMultipartUploadHelper' into w/8.1/bugfix/ZENKO-1522-isMultipartUploadHelper 2019-02-21 17:57:42 -08:00
Jonathan Gramain 9fe0ba5c8c bugfix: ZENKO-1522 helper ObjectMD.isMultipartUpload()
Created this helper to check what kind of CRR to execute depending on
if the object is a MPU or not.
2019-02-21 17:53:45 -08:00
bert-e 2fe1e4da3c Merge branches 'w/8.1/feature/S3C-1925/kmip-ttlv-codec' and 'q/641/8.0/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/q/8.1 2019-02-22 00:49:10 +00:00
bert-e 6a4784417f Merge branches 'w/8.0/feature/S3C-1925/kmip-ttlv-codec' and 'q/641/7.5/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/q/8.0 2019-02-22 00:49:09 +00:00
bert-e 0ed8c750c9 Merge branch 'w/8.0/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/w/8.1/feature/S3C-1925/kmip-ttlv-codec 2019-02-22 00:31:26 +00:00
bert-e 0d33e5a69f Merge branch 'feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/w/8.0/feature/S3C-1925/kmip-ttlv-codec 2019-02-22 00:31:26 +00:00
bert-e ac470f4233 Merge branch 'w/8.0/bugfix/S3C-1985/listing-filter-value' into tmp/octopus/w/8.1/bugfix/S3C-1985/listing-filter-value 2019-02-19 23:45:28 +00:00
bert-e 23d406dc81 Merge branch 'w/7.5/bugfix/S3C-1985/listing-filter-value' into tmp/octopus/w/8.0/bugfix/S3C-1985/listing-filter-value 2019-02-19 23:45:28 +00:00
JianqinWang f11ccbfefa ZENKO-1377: update md mock for ingestion reader raft log tests
- update raft id expected for test bucket `bucket1`
 - clone response object before editing the base mock data
2019-02-11 16:16:54 -08:00
bert-e c8c0527f65 Merge branch 'feature/ZENKO-1446-metadata-mock' into q/8.1 2019-02-08 18:48:32 +00:00
JianqinWang d81d309420 ZENKO-1451: updating mock for use in bb 2019-02-08 08:53:40 -08:00
Dora Korpar c657b4b469 update Arsenal version 2019-02-07 18:03:48 -08:00
Dora Korpar 65c99ff86d ft: ZENKO 1402 move data backends 2019-02-07 18:03:48 -08:00
Jonathan Gramain 645433ed0c refactor: ZENKO-1420 ObjectMD.getUserMetadata()
Copy this helper from backbeat ObjectQueueEntry class since it's
related to ObjectMD and will be used by bare ObjectMD instances.

Add a short unit test too.
2019-02-06 18:50:59 -08:00
JianqinWang f9bb82ce43 ZENKO-1446: metadata-mock for bb 2019-01-30 14:37:43 -08:00
bert-e ab4500b842 Merge branch 'w/8.0/feature/S3C-1561-accountQuotas' into tmp/octopus/w/8.1/feature/S3C-1561-accountQuotas 2019-01-24 21:10:54 +00:00
bert-e 40a802b715 Merge branch 'feature/S3C-1561-accountQuotas' into tmp/octopus/w/8.0/feature/S3C-1561-accountQuotas 2019-01-24 21:10:53 +00:00
Giacomo Guiulfo 84bf7bd511 Merge remote-tracking branch 'origin/bugfix/ZENKO-1369-no-cache-option' into w/8.1/bugfix/ZENKO-1369-no-cache-option 2019-01-07 16:23:17 -08:00
Giacomo Guiulfo b5fa54ec11 bugfix(DataFileStore): add noCache option 2019-01-07 16:20:55 -08:00
Bennett Buchanan 58e9f26ae0 feature: ZENKO-1399 Check transitions time gap 2018-12-20 13:13:04 -08:00
Giacomo Guiulfo d6fdd153aa feat: add unit tests for parseURL 2018-12-18 19:15:10 -05:00
Giacomo Guiulfo 1e05f0f54e chore: move parseURL to utils 2018-12-18 18:17:01 -05:00
Giacomo Guiulfo 9c66b7ceba bugfix(ZENKO-1362): request path encoding 2018-12-18 16:56:02 -05:00
bert-e 0555d0b41a Merge branch 'feature/ZENKO-1389/zenko-md-parallel' into tmp/octopus/w/8.1/feature/ZENKO-1389/zenko-md-parallel 2018-12-18 19:42:52 +00:00
Guillaume Gimenez 39f2a53beb ft: ZENKO-1389: md proxy parallel route 2018-12-18 11:41:37 -08:00
Bennett Buchanan 0a75792ca6 feature: ZENKO-1317 AWS lifecycle compat 2018-12-17 12:45:42 -08:00
bert-e 5225fc231d Merge branch 'feature/ZENKO-1384/zenko-md-healthcheck' into tmp/octopus/w/8.1/feature/ZENKO-1384/zenko-md-healthcheck 2018-12-14 01:31:08 +00:00
Guillaume Gimenez 30c3ce1e2b ft: ZENKO-1384: md proxy healthcheck 2018-12-13 17:30:25 -08:00
Taylor McKinnon aa157c6d13 bf(ZENKO-1310): Fix HealthProbeServer flaky test 2018-12-12 17:01:04 -08:00
Bennett Buchanan 699890d2d7 feature: ZENKO-732 Lifecycle transition policies 2018-12-11 10:07:33 -08:00
Jonathan Gramain ea1a7d4d87 ZENKO-557 extend ObjectMDLocation with dataStoreVersionId
The backend version ID is part of the location for cloud backends, and
it's needed for the GC service to work on cloud locations.
2018-12-10 13:46:09 -08:00
bert-e a9297e707a Merge branch 'feature/ZENKO-717-add-replicationBackends-constant' into q/8.1 2018-12-10 18:43:41 +00:00
Bennett Buchanan 75dccc528d feature: ZENKO-733 Add setReplicationStorageType 2018-12-10 10:33:02 -08:00
bert-e 5d7cf78eda Merge branch 'feature/ZENKO-1351-pfsd-delete' into q/8.1 2018-12-07 22:49:28 +00:00
Giacomo Guiulfo 0a364fe379 feat(DataFileStore): add passthrough delete functionality 2018-12-05 11:33:56 -08:00
Rahul Padigela 345031f2bd chore: ignore a few dependencies for now 2018-12-04 17:39:01 -08:00
greenkeeper[bot] 0bc1fe1a71 chore(package): update lockfile package-lock.json 2018-12-05 01:08:29 +00:00
greenkeeper[bot] f23e457b83 docs(readme): add Greenkeeper badge 2018-12-05 01:08:25 +00:00
greenkeeper[bot] 09aca2dcf4 chore(package): update dependencies 2018-12-05 01:08:23 +00:00
greenkeeper[bot] d304334e92 chore(package): update dependencies 2018-12-05 01:08:20 +00:00
greenkeeper[bot] 7955b97810 chore: add Greenkeeper config file 2018-12-05 01:08:17 +00:00
Rahul Padigela d14cef843b feature: greenkeeper.io dependency manager boT 2018-12-04 16:57:38 -08:00
Dora Korpar f2b39fb3d7 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-1678-ipv6' into w/8.1/bugfix/S3C-1678-ipv6 2018-11-15 16:18:47 -08:00
Dora Korpar 9a009746be Merge remote-tracking branch 'origin/bugfix/S3C-1678-ipv6' into w/8.0/bugfix/S3C-1678-ipv6 2018-11-15 16:15:14 -08:00
Jeremy Desanlis 3e08bad2da ft: ZENKO-717: move replicationBackends constant from cloudServer.
With the management code moved from cloud server to its own repository,
this constant should be shared in Arsenal constants.
2018-11-15 15:22:41 -08:00
philipyoo 13b156b226 ft: ZENKO-1282 add scheduled resume for ingestion 2018-11-15 12:40:12 -08:00
JianqinWang 07f655c2f8 improvement: bump arsenal ver. for 8.1 2018-11-13 11:35:03 -08:00
JianqinWang f496cec8bf feat: ZENKO-833 add ingestion param for BucketInfo 2018-11-13 11:34:57 -08:00
bert-e 7f5413699d Merge branch 'bugfix/ZENKO-1175-oplogSkipFix' into tmp/octopus/w/8.1/bugfix/ZENKO-1175-oplogSkipFix 2018-11-08 22:47:51 +00:00
Jonathan Gramain d620fef517 bf: ZENKO-1175 fix when no saved ID exists
In case where there is no saved ID yet (initial deployment), do
process the very first entry in the log instead of skipping it. In
practice it should not have an impact because the very first entry in
the log is normally not due to be processed for CRR, but it ensures
correctness.
2018-11-08 14:46:04 -08:00
Jonathan Gramain 8ac3cf5548 ft: ZENKO-1175 tailable cursor to consume mongo oplog
Use a tailable custor to keep ordering guarantees for the records we
read. This also means we have to read from the beginning when we
reconnect (at startup), and start processing when we encountered the
unique ID previously stored in zookeeper.

Also removed dispatcher mode with MongoLogReader (was only used for
the short-lived Federation deployment of Zenko).
2018-11-08 14:45:07 -08:00
Giacomo Guiulfo ebd9a74666 feat: passthroughFile service 2018-11-02 15:52:50 -07:00
bert-e a1f9bef60e Merge branch 'feature/ZENKO-1282-ingestionPauseResume' into q/8.1 2018-10-30 16:50:19 +00:00
philipyoo 899107913c ft: ZENKO-1282 extend pause/resume for ingestion
Extend the backbeat api routes to add pause/resume for
ingestion.
2018-10-25 12:01:20 -07:00
Jonathan Gramain 18dfc6b4fa Merge remote-tracking branch 'origin/feature/S3C-1640-CRRRetryBackport' into w/8.0/feature/S3C-1640-CRRRetryBackport 2018-10-19 17:31:22 -07:00
Rahul Padigela 9fe16c64fa Merge remote-tracking branch 'origin/improvement/bumpArsenal' into w/8.0/improvement/bumpArsenal 2018-10-15 14:57:30 -07:00
vrancurel 3dee6e2d0b bugfix: manage the 'close' event on dataStream
When the underlying socket of the dataStream is closed this
  is not considered as a stream error. So we have to hook the
  event and do the cleanup by ourselves
2018-10-15 11:13:47 -07:00
vrancurel 3545eb4d62 bugfix: close socket on client error
When receiving this callback, sometimes the socket is already
  closed (e.g. upon RST) but sometimes we have to cloud it ourselves.
2018-10-15 11:13:47 -07:00
Dora Korpar 0a85eeb8b7 manual edit: add metastore changes 2018-09-25 16:17:42 -07:00
Dora Korpar 83759870f2 Merge remote-tracking branch 'origin/feature/S3C-1171-listv2' into w/8.0/feature/S3C-1171-listv2 2018-09-25 16:16:51 -07:00
Alexander Chan 0d4bf3c17f ft: ZENKO-1186 stalled sanity check 2018-09-20 15:40:07 -07:00
Alexander Chan 0117b39dcf bf: ZENKO-1155 add index restriction for mongo find call 2018-09-15 19:27:56 -07:00
Bennett Buchanan 549ca1f683 bugfix: ZENKO-1144 Update route and private method 2018-09-15 10:03:44 -07:00
bert-e e4a66343fb Merge branch 'bugfix/ZENKO-1144-fixSortedSetHelper' into q/8.0 2018-09-14 21:32:15 +00:00
philipyoo a89fdde6fd bf: ZENKO-1144 fix ttl of sorted set expires
Changes in this commit:
- Fix TTL Redis#expire from ms to secs
2018-09-14 14:29:55 -07:00
philipyoo 872a2d88e5 bf: ZENKO-1144 remove redis scan in crr metrics
Changes in this commit:
- Remove use of Redis#scan. Instead build query strings
  manually
2018-09-14 08:51:40 -07:00
philipyoo 0c9c462634 bf: ZENKO-1144 add sorted set support StatsModel
Changes in this commit:
- Helper method _normalizeTimestampByHour normalizes date to
  nearest hour
- Helper method _setDateToPreviousHour sets date back 1 hour
- method getSortedSetHours returns list of 24 normalized
  hourly timestamps
- method getSortedSetCurrentHour returns normalized
  hourly timestamp based on epoch passed
- method addToSortedSet adds to a sorted set and applies
  expiry if adding to new sorted set
2018-09-14 07:40:30 -07:00
philipyoo a3973ac7d3 ft: ZENKO-1144 redis wrapper for sorted sets
Changes in this commit:
- Add wrapper for Redis sorted set methods: ZADD, ZCARD,
  ZRANGE, ZRANGEBYSCORE, ZREM, ZSCORE
- Add wrapper for Redis methods: EXISTS
2018-09-13 18:28:48 -07:00
bert-e d1a8693fe5 Merge branch 'bugfix/ZENKO-1124-mongo-listing-loop' into q/8.0 2018-09-11 01:02:10 +00:00
Jeremy Desanlis 5687a48599 ZENKO-1124: mongo listing, avoid to loop 2018-09-10 17:07:01 -07:00
Nicolas Humbert 9dca871e1b fx: ZENKO-1112 Management client error logging 2018-09-07 10:04:26 -07:00
philipyoo 7088812c80 bf: ZENKO-1024 fix fail metrics in all route
All metrics function will query redis once for all data.
With the change to failure metrics, we want to pass
the request details object to the getFailedMetrics fxn
2018-09-04 08:48:05 -07:00
philipyoo 9f742d4921 bf: ZENKO-1024 use pending metrics for backlog
Pending metrics don't expire which was a cause for problems
with current backlog. This quick fix is to use pending
metrics in place of backlog but keeping the same names
and routes in place to avoid regression.
2018-08-31 17:19:14 -07:00
bert-e 2c31728905 Merge branch 'bugfix/ZENKO-1024/add-global-counters' into q/8.0 2018-08-24 22:06:46 +00:00
Bennett Buchanan 125ccbbfa9 bugfix: ZENKO-1024 Add pending counters 2018-08-24 14:28:36 -07:00
bert-e 40c8b37b30 Merge branch 'feature/ZENKO-1019-cancelScheduleResume' into q/8.0 2018-08-23 22:59:12 +00:00
bert-e 879075e4ec Merge branch 'bugfix/ZENKO-945-delimitermaster-filter' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-filter 2018-08-22 23:46:28 +00:00
philipyoo 79ed68ce9f ft: ZENKO-1019 add cancel scheduled resume route 2018-08-22 13:47:37 -07:00
bert-e cbfacb5ec0 Merge branch 'bugfix/ZENKO-945-delimitermaster-test' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-test 2018-08-20 23:40:54 +00:00
philipyoo 06dfdd9612 rf: use single StatsModel, use explicit var names
Changes in this commit:
- Remove `OBJECT_MONITORING_EXPIRY` and use `EXPIRY` instead
  as values are now same
- Use single instance of `StatsModel`
- Remove extra interval in `StatsModel` expiry field. Not
  needed anymore as throughput uses a 15 minute window and
  the extra interval for it will be available by default
- Use explicit variable names when data is fetched from
  `StatsClient`
2018-08-08 14:53:30 -07:00
philipyoo bf95506495 ft: ZENKO-925 increase crr metrics expiry to 24hrs
Changes reflected in this commit:
- Increase metrics expiry, but keep throughput to 15 minute
  averages.
- Add helper method `_getMaxUptime` to find # of intervals
- Update tests to reflect the extra intervals fetched from
  Redis/StatsModel
2018-08-08 14:53:30 -07:00
Alexander Chan db743f8269 improvement: version increase 2018-08-08 10:03:19 -07:00
Alexander Chan a2311bb69c bf: ZENKO-922 add redis disconnect method
Adds disconnect method to allow closing of the backbeat metrics redis
client
2018-08-06 17:52:24 -07:00
Alexander Chan c8f323237f bf: ZENKO-903 retrieve new bucket list on report 2018-08-06 13:07:16 -07:00
Rahul Padigela 5cf55fcb68 improvement: update package-lock version 2018-08-01 17:16:23 -07:00
Rahul Padigela de94a0e62e improvement: update test to adjust to nodejs 8
Buffer.from no longer throws errors if most of the string contains a valid hex.
Since the test is testing if an error is thrown for invalid hex, the test has been
updated to do the same.
2018-08-01 17:12:29 -07:00
Rahul Padigela 2b13994795 improvement: run tests in eve with nodejs 8 2018-08-01 16:25:59 -07:00
Rahul Padigela 769a461178 improvement: move metrics tests to functional 2018-08-01 16:24:40 -07:00
Rahul Padigela c11fc1d9d8 bugfix: ZENKO-898 install node-fcntl module 2018-08-01 15:59:11 -07:00
bert-e b8ad86a1f1 Merge branch 'feature/ZENKO-785-add-checkHealth-to-mongodb' into q/8.0 2018-07-31 19:04:56 +00:00
Giacomo Guiulfo 12c4df722b feat: add checkHealth to mongodb interface 2018-07-31 11:40:55 -07:00
bert-e f566e32322 Merge branch 'bugfix/ZENKO-751-setMaxObjectKeyLimit' into q/8.0 2018-07-30 17:17:11 +00:00
philipyoo 6413c92fbc bf: ZENKO-751 tempfix set max object key limit
Max key length will be set to 915 to account for different
situations. Default AWS key size is 1024, but mongo keys
allow for bytes of up to 1012. Factoring in version id,
and bucket match false (bucket name prefix), for now,
we will limit the key size to 915 and return an error
right away if object key byte size exceeds this limit.
2018-07-27 14:40:14 -07:00
bert-e 29182cce05 Merge branch 'bugfix/ZENKO-763-objectTagsAreNotReplicated' into q/8.0 2018-07-27 18:40:17 +00:00
Jonathan Gramain 9fb5b8b10d bf: ZENKO-763 rework mongo log consumer
- process individual mongo log entry types separately ('i', 'u',
  'd'). This is the main fix required to process updates coming from
  put-object-tagging or ACLs

- fix usage of uniqID:

  - previously it was ignored due to a typo (uniqId instead of
    uniqID), which meant we still processed multiple times entries
    from the same second

  - with typo fixed, it requires another change to make it useful:
    we have to emit the 'info' event at the end of the batch so that
    the last unique ID is presented

  - cleaner serialization of timestamp+uniqID: use JSON rather than
    custom parsing and pass it as an opaque string in info.end

- correct use of stream functions (e.g. end() was masked by a local
  variable called "this.end", fixed by prefixing private members with
  "_")

- fix timestamp output: do not use private member _high from
  Timestamp, use toNumber()

- fix mongo log flow control when reading from mongo log by using
  pipe() instead of just calling write() then end() so we don't
  bufferize contents unnecessarily.

- removed some unnecessary special case handling and 'this.push()'
  calls

Add unit tests to check ListRecordStream with known mongo log entry
types (which required moving the class in a separate file from
LogConsumer.js)
2018-07-25 18:06:06 -07:00
vrancurel 5631a892c6 bugfix: temporary fix for the s3-data pod ballooning issue
that consists in disabling file level caching for the files
  we store in the file data backend.
2018-07-25 16:17:16 -07:00
Rahul Padigela dfcdea46fc improvement: ZENKO-760 use callback instead of throw
This lets CloudServer handle MongoClient issues more gracefully
2018-07-20 17:21:36 -07:00
Rahul Padigela be02e59bfe bugfix: ensure setup callback is called 2018-07-20 16:51:17 -07:00
Rahul Padigela fdbeed1c4e improvement: ZENKO-760 add connection timeout for monogoclient
Mongoclient checks each node in the replica set to see which one's the primary,
this check has a default timeout of 30s which delays the startup of Cloudserver
when one of the nodes is unavailable. Cutting down the timeout makes it go through the
list of nodes in the replica set quicker to find the primary. MONGO_CONNECT_TIMEOUT_MS env
var is introduced to adjust the timeout in deployments.
2018-07-20 14:30:38 -07:00
bert-e 91fbc3fd23 Merge branch 'bugfix/ZENKO-642-multipleLifecycleConfigTags' into q/8.0 2018-07-17 20:42:38 +00:00
philipyoo 241338bcfa bf: apply multiple lifecycle filter tags if exists 2018-07-17 13:22:16 -07:00
Rached Ben Mustapha 6db80e9411 bf: return timely on data diskUsage subresource 2018-07-17 10:46:40 -07:00
bert-e d701352635 Merge branch 'bugfix/ZENKO-693/fixNegativeValues' into q/8.0 2018-07-10 20:02:18 +00:00
Alexander Chan b291ccc03f bf: ZENKO-693 clamp negative values to 0 2018-07-10 12:11:16 -07:00
Bennett Buchanan 0426f44dee bugfix: ZENKO-621 Make _buildKey public method 2018-07-10 11:34:47 -07:00
Rahul Padigela 1b9242788a bugfix: ZENKO-632 check if destroy method is available
s3-data returns Readable unlike sproxydclient which returns an instance of
http.IncomingMessage which implements Readable stream and extends it with
destroy method
2018-07-06 15:30:38 -07:00
Bennett Buchanan 1a2ea2f353 feature: ZENKO-483 Update Redis key schema 2018-07-05 15:31:01 -07:00
Bennett Buchanan c36280a6e8 feature: ZENKO-483 Monitor CRR upload 2018-07-05 10:51:48 -07:00
bert-e c749725410 Merge branch 'bugfix/ZENKO-579-skip-scan-fix' into q/8.0 2018-07-01 21:30:53 +00:00
Alexander Chan 3d06ec6230 bf: ZENKO-625 fix mongo aggregate params 2018-07-01 12:03:46 -07:00
Jonathan Gramain 159ebb4283 bugfix: ZENKO-433 fix when 'params' is undefined 2018-06-30 19:20:55 -07:00
Alexander Chan e17333b19e ft: ZENKO-597 account for transient source in TDM 2018-06-30 15:11:12 -07:00
philipyoo b3b22292c4 ft: ZENKO-584 add failed CRR metrics route 2018-06-30 08:14:38 -07:00
bert-e 68d27ed5bf Merge branch 'bugfix/ZENKO-603/mongoItemCount' into q/8.0 2018-06-30 04:58:41 +00:00
bert-e 1e79964253 Merge branch 'feature/ZENKO-239-scheduleResumeRoutes' into q/8.0 2018-06-29 22:54:40 +00:00
philipyoo 5f76343c2e ft: ZENKO-239 add schedule resume routes 2018-06-29 15:13:50 -07:00
Alexander Chan d907c9942d bf: use bucketName instead of c.s.name 2018-06-29 12:52:21 -07:00
Alexander Chan c63b0713c0 bf: add more tests 2018-06-29 12:50:49 -07:00
Alexander Chan 6a9a88800a rf: use mongodb aggregate method for item count 2018-06-29 11:56:30 -07:00
Dora Korpar 5834f15397 ft: ZENKO-582 preferred read location
Add preferred read location specification in replication configuration

E.g. <StorageClass>aws,gcp:preferred_read</StorageClass>
2018-06-28 14:31:35 -07:00
bert-e b50f6c4678 Merge branch 'feature/ZENKO-583-crrStatusRoute' into q/8.0 2018-06-28 17:20:54 +00:00
bert-e edeab02107 Merge branch 'feature/pensieve-stats' into q/8.0 2018-06-28 17:17:43 +00:00
David Pineau c64cccdf55 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 18:51:07 +02:00
vrancurel af2b3a4bc3 bugfix: fix versioningGeneral2 test failing with Mongo
When an object has been created without versioning and
the versioning has been enabled, when creating a version
we must consider the case that the object doesn't have
the versionId property.
2018-06-27 18:38:28 -07:00
philipyoo 1e9ad08830 ft: ZENKO-583 add crr status check route 2018-06-27 17:20:11 -07:00
David Pineau 9e66fda610 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-27 18:42:41 +02:00
Rahul Padigela 888e154f0e Merge remote-tracking branch 'origin/feature/ZENKO-267-Routes-MD-Ingestion' into w/8.0/feature/ZENKO-267-Routes-MD-Ingestion 2018-06-26 17:22:02 -07:00
Nicolas Humbert 8448f909e4 FT: push isVersioned and ownerCanonicalId stats 2018-06-26 17:07:16 -07:00
bert-e 2b16e84733 Merge branch 'feature/ZENKO-235-manualPauseResumeRoutes' into q/8.0 2018-06-27 00:00:25 +00:00
philipyoo a1a6f65364 ft: add crr pause/retry/status routes 2018-06-26 16:55:11 -07:00
bert-e 7cf0c97d8e Merge branch 'feature/ZENKO-437_Backbeat_Readiness_Probes' into q/8.0 2018-06-26 23:26:20 +00:00
Taylor McKinnon 10e7b976d5 feat(zenko-437): Add HealthProbeServer 2018-06-26 15:56:54 -07:00
vrancurel e80ea95ad8 bugfix: fix skip scan on Mongo
This allows to skip scans when it is too long to jump
over a prefix. Also it has the side effect of batching
more common prefixes in one s3 list call with delimiter
2018-06-26 14:08:18 -07:00
Jeremy Desanlis 7075318dd2 bf: ZENKO-578 mongoDB error replies
Do not raise an internalError to upper layer when mongoDB fails to
update the master version with a specific error code. This fix is
related to the mongoDB issue: SERVER-19600.

This commit fixes too the message field name of the mongoDB error, it is
'errmsg' and not 'message'.
2018-06-26 14:00:08 -07:00
bert-e 38f68fba1a Merge branch 'bugfix/ZENKO-308-listversion' into q/8.0 2018-06-25 17:45:24 +00:00
vrancurel 16f9a6f5f6 bugfix: list version is incorrect because sometimes
we replace the master with an incorrect last version
 because inserts are sometimes swapped. Add a check
 to be sure we always replace the master with a
 smaller (thus more recent) version.
2018-06-25 09:28:34 -07:00
bert-e c48e4b89bd Merge branch 'feature/ZENKO-315/CRRWithoutVersioning' into q/8.0 2018-06-23 00:00:50 +00:00
Bennett Buchanan 2a8169e936 feature: ZENKO-315 Add NFS properties 2018-06-22 14:02:39 -07:00
Alexander Chan 1af67fffc7 bf: fix mongo counter 2018-06-21 19:58:42 -07:00
Guillaume Gimenez e9ac11b1fe ft: ZENKO-561: bucket attributes handling fixed
on putBucketAttributes and getBucketAttributes
2018-06-20 15:54:43 -07:00
bert-e 30dcd6ef86 Merge branch 'feature/ZENKO-433/countIncUpdateandRefresh' into q/8.0 2018-06-20 22:05:49 +00:00
Alexander Chan 2ce9db4e01 ft: ZENKO-433 add item count support incremental update and refresh 2018-06-20 10:00:57 -07:00
philipyoo 9e234e2b41 bf: zero-fill response for getAllStats 2018-06-13 15:05:38 -07:00
philipyoo 83a831f512 rf: edit monitoring route details 2018-06-13 14:11:45 -07:00
Guillaume Gimenez 32c2a6fe99 FT: Metadata Proxy Server 2018-06-13 10:06:05 -07:00
Rahul Padigela 063361377c chore: update version and dependencies 2018-05-30 16:44:17 -07:00
Rahul Padigela ea7f28c82d
Merge pull request #495 from scality/fwdport/z/1.0-master
Fwdport/z/1.0 master
2018-05-30 08:32:59 -07:00
Rahul Padigela a9e760b32e chore: use correct dependency branches 2018-05-29 17:01:49 -07:00
Rahul Padigela 3b16a307b8 Merge remote-tracking branch 'origin/z/1.0' into fwdport/z/1.0-master 2018-05-29 16:52:11 -07:00
Rahul Padigela f8dfa378a1
Merge pull request #494 from scality/bf/ZENKO-370-restoreMongoOpLogFilteringPerDb
restore mongo op log filtering per db
2018-05-29 09:28:54 -07:00
Jonathan Gramain e16eadb474 bf: ZENKO-370 restore mongo oplog db filtering
For Orbit that has multiple instances per mongo database. This change
restores the filtering per db, but keeps publishing the internal DBs
that have '__' in their name.

Also attempt to fix the original regexp which was matching the '$DB.'
pattern at any place, not at the beginning of the 'ns' field.
2018-05-25 14:54:21 -07:00
Rahul Padigela 5bf7fef53c
Merge pull request #491 from scality/bf/ZENKO-355-byteToMBConversion
bf: ZENKO-355 crr stats byte conversion
2018-05-22 15:00:45 -07:00
philipyoo 659aee2fc2 bf: fix/change byte conversion 2018-05-21 16:07:15 -07:00
Rahul Padigela bde52ab89b
Merge pull request #492 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated-fixInfoEvent
bf: ZENKO-344 don't shunt 'info' event production
2018-05-15 16:24:13 -07:00
Jonathan Gramain 0ddb4da8a9 bf: ZENKO-344 don't shunt 'info' event production
Make sure the mongo consumer produces the 'info' event from the LogConsumer
when the lastEndID has not been reached yet.
2018-05-14 17:31:05 -07:00
Rached Ben Mustapha 56e280236b
Merge pull request #490 from scality/fix/ZENKO-346-no-crash-on-empty-stats
fix: do not crash on empty backbeat stats
2018-05-11 15:32:04 -07:00
Rached Ben Mustapha f904f04401 fix: do not crash on empty backbeat stats 2018-05-11 11:51:12 -07:00
Rahul Padigela db45fee9e8
Merge pull request #487 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated
Bf/zenko 344 mongo log entries duplicated
2018-05-11 10:16:27 -07:00
JianqinWang ecc431c715 bf: typo in oplogReplay 2018-05-11 10:12:03 -07:00
JianqinWang 6f694ae7f4 bf: ZENKO-344 Fix duplicate mongo logs 2018-05-11 10:12:03 -07:00
Rahul Padigela e7862d3922
Merge pull request #489 from scality/bf/ZENKO-343-dontFilterInternalMongoNs
bf: ZENKO-343 remove regexp-based 'ns' filtering
2018-05-10 21:16:59 -07:00
Jonathan Gramain de7ebf70d7 bf: ZENKO-343 remove regexp-based 'ns' filtering
Don't filter internal namespace entries from mongo log, as backbeat
need the metastore entries exposed to process lifecycle entries.
2018-05-10 16:00:07 -07:00
Rahul Padigela 1425f03c1e
Merge pull request #486 from scality/bf/ZENKO-323-relaxMongoCountError
bf: ZENKO-323 relax mongo count error
2018-05-10 09:55:20 -07:00
Alexander Chan ad527911a2 bf: ZENKO-323 relax mongo count error 2018-05-08 17:18:26 -07:00
Rahul Padigela 6c528688ee
Merge pull request #485 from scality/back-porting-master
FX: constructing v4 query auth signature with proxyPath
2018-05-08 15:03:17 -07:00
Nicolas Humbert e53aa2efd2 FX: constructing v4 query auth signature with proxyPath
(cherry picked from commit 160b960607)
2018-05-08 14:51:13 -07:00
Rahul Padigela 873bc9b647
Merge pull request #479 from scality/ft/proxy
FX: constructing v4 query auth signature with proxyPath
2018-05-08 12:01:12 -07:00
Nicolas Humbert 160b960607 FX: constructing v4 query auth signature with proxyPath 2018-05-08 11:55:24 -07:00
Rahul Padigela 843bd1fe13
Merge pull request #484 from scality/bf/ZENKO-314-removeMongoCursorLimit
bf: ZENKO-314 remove mongo cursor limit
2018-05-07 18:40:28 -07:00
Alexander Chan 93a2a79699 bf: remove mongo cursor limit
removed the mongo cursor hard coded limit as that introduced undesired
behavior with small max-keys
2018-05-07 17:58:07 -07:00
Rahul Padigela ef32d5e94d
Merge pull request #481 from scality/bf/mongo-cursor-limit
bf: fix mongo cursor limit
2018-05-03 12:18:01 -07:00
Alexander Chan 45d9c3d999 bf: fix mongo cursor limit
use hard coded limit for the cursor limit
2018-05-02 19:30:39 -07:00
Rahul Padigela a2ce46d8d0
Merge pull request #478 from scality/ft/Zenko-21/prom-route
Zenko-21: FT: New Route for Prometheus Client
2018-05-02 12:38:34 -07:00
anurag4DSB 0c0bffa2c3 ft: ZENKO-21 add prometheus monitoring route
Signed-off-by: anurag4DSB <anurag.213@gmail.com>
2018-05-02 12:29:57 -07:00
ironman-machine d966c0bda9 merge #477 2018-05-02 01:36:01 +00:00
Rahul Padigela cb86a857cc
Merge pull request #476 from scality/bf/mongo-list-object-limit
Bf/mongo list object limit
2018-05-01 16:56:13 -07:00
Alexander Chan 55c9441bd7 bf: add mongo list-object max limit 2018-05-01 10:32:01 -07:00
David Pineau cae55a65c8 ft: interpret healthcheck status from bucketd
As discussed in S3C-1412, it is necessary for S3 to interpret Bucketd health
status in order to provide more flexibility (relatively to the health check
mechanism) than failing due to a light partial unavailability of the platform.

This is done in the bucketclient backend's healthcheck method, in order to
comply with all the other backends.

Fixes S3C-1412

Signed-off-by: David Pineau <david.pineau@scality.com>
2018-04-30 15:58:05 -07:00
philipyoo 114cbf5571 bf: use expired interval to avg out throughput
When an interval of data in Redis expires, throughput will
abruptly reduce. Given the data we collect, we can only
calculate the average throughput. To ease the erratic
decrease on expiration of an interval, instead, get the
average of elapsed time for the newest interval and
remaining time of the interval multiplied against the
average of the just-expired interval.

In Redis, we need to save an extra interval to reference
the just-expired data.
2018-04-30 10:23:22 -07:00
Alexander Chan f2bab3b3d6 ft: ZENKO-262 update bucketInfo model 2018-04-30 10:23:22 -07:00
philipyoo 3276d235bb bf: do not include current UploadIdMarker in list
for in_memory only
listMultipartUpload should not list current marker in the
listing. Previously, it would include the marker as the
first item in the list
2018-04-30 10:23:22 -07:00
philipyoo ee2aed10f3 ft: add uid property to all buckets 2018-04-24 10:07:58 -07:00
Rahul Padigela 19bee770ea chore: update scality dependencies 2018-04-23 12:23:58 -07:00
Rahul Padigela e0c5d03436 chore: update version and author 2018-04-23 12:18:42 -07:00
Rahul Padigela c8a7148645
Merge pull request #472 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-23 00:07:18 -07:00
Rahul Padigela 8ca5dce4fe Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-23 00:03:04 -07:00
Bennett Buchanan 599fb5709b
Merge pull request #470 from scality/rf/S3C-1399/refactor-backbeat-metrics-into-arsenal
rf: S3C-1399 Add Backbeat metrics and routes
2018-04-20 16:30:39 -07:00
Rahul Padigela 1161d5f75d
Merge pull request #471 from scality/fwdport/7.4-7.4-beta
Fwdport/7.4 7.4 beta
2018-04-19 11:04:15 -07:00
Rahul Padigela 26b6c5d1d9 Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta 2018-04-19 11:00:33 -07:00
Bennett Buchanan 8fd50cd20e rf: S3C-1399 Add Backbeat metrics and routes 2018-04-18 16:46:07 -07:00
Rahul Padigela 1f6b5bf2bd
Merge pull request #469 from scality/fix/less-verbose-report
Do not log report requests
2018-04-16 13:18:45 -07:00
Rached Ben Mustapha a7813daea9 Do not log report requests 2018-04-16 11:55:44 -07:00
Rahul Padigela 5d4eb84425
Merge pull request #468 from scality/ft/mongo-caching
add mongo client caching
2018-04-13 17:39:51 -07:00
Alexander Chan 9511fff479 add mongo client bucket/object metrics caching 2018-04-13 17:25:03 -07:00
Rahul Padigela d70f64a6d0
Merge pull request #465 from scality/fx/data-managed-count
fx: correct data managed count
2018-04-13 16:48:01 -07:00
Alexander Chan ee66dc811c fx: correct data managed count
fixes issue with mongoclient countItems
+ accounts for data stored directly to external backend
+ adds check to handle versioned bucket and non-versioned buckets differently
2018-04-13 16:32:43 -07:00
Rahul Padigela 2710471726
Merge pull request #467 from scality/backport/master-rel/7.4-beta
Backport: master to rel/7.4-beta
2018-04-10 17:57:54 -07:00
Dora Korpar 9aee9f6cf0 ft: extract function for date modified headers
(cherry picked from commit 92da4c90e5)
2018-04-10 17:44:22 -07:00
Rahul Padigela a168fab266
Merge pull request #435 from scality/ft/objdel-add-modified-header-check
Ft/objdel add modified header check
2018-04-10 17:30:54 -07:00
Dora Korpar 92da4c90e5 ft: extract function for date modified headers 2018-04-10 17:05:03 -07:00
Rahul Padigela a95d5ea15d
Merge pull request #464 from scality/fix/flaky-mongo
Fixes flakiness in S3 functional tests with mongo backend
2018-04-07 22:16:56 -07:00
Salim aad05faa12 Fixes flakiness in S3 functional tests with mongo backend 2018-04-06 17:27:24 -07:00
Rahul Padigela ab230ebfe7
Merge pull request #463 from scality/fix/mongo-tests
Fix/mongo tests
2018-04-06 16:48:53 -07:00
Salim b3103e1307 ZENKO-227 fix: mongodb versioning
This fixes the problem where if the version ID is passed, it will cause
an Internal error failure because it was trying to create a new object
with the same key value. This adds a check to see if the object exists
first then updates and upserts accordingly.
2018-04-06 15:41:09 -07:00
Salim f3b0091210 feat: add error KeyAlreadyExists 2018-04-06 11:19:37 -07:00
Rahul Padigela f633b91072
Merge pull request #460 from scality/ft/add-data-managed
ft; add data managed metrics
2018-04-06 10:47:06 -07:00
Alexander Chan 87807462dc ft; add data managed metrics 2018-04-05 10:17:26 -07:00
Rahul Padigela d7f114d504
Merge pull request #461 from scality/fix/skip-mpu-bucket-prefix
Skip MPU shadow buckets
2018-04-03 14:42:28 -07:00
Rached Ben Mustapha 5ef168e654 Skip MPU shadow buckets 2018-04-03 13:20:57 -07:00
Rahul Padigela 82b4055c6c
Merge pull request #459 from scality/fix/stuck-replication
Fix/stuck replication
2018-04-02 14:55:15 -07:00
Rached Ben Mustapha 91ccccfe85 Remove use of global variable 2018-04-02 14:49:04 -07:00
Rached Ben Mustapha 696999874b Fix replication stream getting stuck
The mongodb transform stream would never actually emit any objects
to the extensions.
2018-04-02 14:31:54 -07:00
Rached Ben Mustapha d2bed3bf9a Un-hardcode mongodb database name 2018-04-02 14:31:07 -07:00
Rahul Padigela ad42baa5ff
Merge pull request #458 from scality/fix/mongologreader-contract
Fix/mongologreader contract
2018-04-02 12:07:20 -07:00
Rached Ben Mustapha 6ac92b2ad2 Fix mongodb log consumer initial values
Pre-existing LogConsumer contract uses `null` for initial values,
`undefined` breaks client code assumptions.
2018-04-02 11:31:02 -07:00
Rahul Padigela 13dbf48867
Merge pull request #457 from scality/ft/initial-instance-id
Ft/initial instance
2018-03-30 16:59:11 -07:00
Rached Ben Mustapha e79ad68e96 S3C-1355 Use provided instance id 2018-03-30 16:24:51 -07:00
Rahul Padigela a4a5fe0db0
Merge pull request #456 from scality/ft/ZENKO-147/crr-retry-kafka
FT: Add objectMD setters for replicationInfo
2018-03-30 11:37:22 -07:00
Bennett Buchanan f838fcc31f FT: Add objectMD setters for replicationInfo 2018-03-29 16:27:48 -07:00
VR eb9dd23b14
Merge pull request #455 from scality/ZENKO-222-bf-mongo-url
ZENKO-222 bf: revert mongo url
2018-03-28 18:03:19 -07:00
JianqinWang edbf7ab650 ZENKO-222 bf: revert mongo url 2018-03-28 17:57:59 -07:00
Rahul Padigela e068950903
Merge pull request #453 from scality/forward/orbit
Forward/orbit
2018-03-28 16:25:55 -07:00
Rahul Padigela 1ceb7b264c chore: remove branch version from package.json 2018-03-28 16:03:42 -07:00
vrancurel 5a29aaa10c fixing metadata search broken by commit ea8d523501fcd996447986318e59a95e729563b0 2018-03-28 16:03:42 -07:00
Rahul Padigela 7587f7ba25 ft: update version 2018-03-28 16:03:42 -07:00
Rahul Padigela 795b145594
Merge pull request #452 from scality/add-bson-to-dependencies
add bson to dependencies
2018-03-28 11:16:01 -07:00
Jeremy Desanlis 58f027a693 add bson to dependencies 2018-03-27 18:36:13 -07:00
Rahul Padigela e09348d658
Merge pull request #451 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 17:22:15 -07:00
Alexander Chan bddb90c6a1 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-27 15:49:03 -07:00
Rahul Padigela 94efaaccc2
Merge pull request #446 from scality/ft/S3C-1327-add-bucketinfo-uid-prop
ft: Add uid property to BucketInfo
2018-03-26 07:00:50 -07:00
Rahul Padigela 463a8ebe15
Merge pull request #448 from scality/fwd/7.4-to-7.4-beta
Fwd: 7.4 to 7.4 beta
2018-03-26 07:00:19 -07:00
philipyoo f17ce17857 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-to-7.4-beta 2018-03-23 10:24:36 -07:00
Rahul Padigela 3a5250e2e9
Merge pull request #437 from scality/ft/S3C-1148-statsclient-multiple-ids
Ft/S3C-1148 statsclient multiple ids
2018-03-22 14:56:05 -07:00
ironman-machine 48cb7b3b05 merge #447 2018-03-21 18:44:42 +00:00
Nicolas Humbert 84c4c147a2 FIX: Mongo Client - countItems 2018-03-20 17:54:59 -07:00
Rahul Padigela 958e818655
Merge pull request #445 from scality/fwd/7.4-beta-master
Fwd/7.4 beta master
2018-03-20 14:51:33 -07:00
philipyoo 91dd219c47 ft: Add uid property to BucketInfo
Needed for lifecycle processing in backbeat
2018-03-19 18:56:52 -07:00
Alexander Chan 5f3d478edb Merge remote-tracking branch 'origin/rel/7.4-beta' 2018-03-19 15:49:24 -07:00
Rahul Padigela 04d56cfdff ft: update version number 2018-03-14 13:28:10 -07:00
Rahul Padigela 73dd529c29 ft: update package.json dependencies 2018-03-14 13:08:44 -07:00
philipyoo a9aa40c168 ft: extend statsclient to query by list of ids
Extend support for querying a list of ids and
returning a total sum of results for that list.

Also add wrapper for redis method `keys`
2018-03-14 11:22:15 -07:00
ironman-machine 189194a4e7 merge #433 2018-03-08 20:39:29 +00:00
JianqinWang a9a6b2433d rf: remove use of util.format 2018-03-08 09:22:28 -08:00
JianqinWang fa19fc8859 rf: name change for replica set hosts 2018-03-08 09:22:28 -08:00
JianqinWang a269619698 ZENKO-15 ft: oplog tailer for MongoDB 2018-03-08 09:22:28 -08:00
Rahul Padigela da1da43597
Merge pull request #438 from scality/fwdport/7.4-master
Fwdport/7.4 master
2018-03-08 00:26:46 -08:00
Rahul Padigela caac4e4e7e Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-master 2018-03-07 19:08:15 -08:00
Rahul Padigela 67250133dc
Merge pull request #436 from scality/bf/missing-bucketclient-param
bf: fix missing param needed for BCI
2018-03-06 16:43:35 -08:00
JianqinWang d3f3be03ae bf: fix missing param needed for BCI 2018-03-06 16:39:54 -08:00
ironman-machine 1a9f1afd2c merge #425 2018-03-06 18:25:58 +00:00
JianqinWang 9a5afdbc5c rf: rename mongo replicaset hosts 2018-03-05 17:28:11 -08:00
JianqinWang 83cf54512b ZENKO-140 rf: extract metadata backends from S3 2018-03-05 16:33:38 -08:00
ironman-machine 7e3ad64456 merge #432 2018-02-24 01:27:51 +00:00
Nicolas Humbert eba0cb6116 FT: add proxy_path header 2018-02-22 17:16:43 -08:00
Lauren Spiegel fd23e82ab9
Merge pull request #419 from scality/fix/replaceUpdate
Fix/stopSwallowingErrors
2018-02-14 13:08:05 -08:00
Lauren Spiegel d7cf5e8ccf FIX: Stop swallowing errors 2018-02-14 13:02:22 -08:00
flavien-scality d0f4f95f0d
Merge pull request #417 from scality/fwd/7.4-master
Fwd/7.4 master (try succeeded)
2018-02-14 09:54:16 +01:00
Alexandre Merle 0e606b1061 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-master 2018-02-14 04:24:23 +01:00
ironman-machine 44ead88d83 merge #420 2018-02-13 19:32:13 +00:00
vrancurel d8e1497940 use hosts config instead of host and port 2018-02-12 15:24:51 -08:00
ThibaultRiviere 4193394340
Merge pull request #407 from scality/fwdport_7.4_master
Fwdport 7.4 master
2018-02-07 13:42:22 +01:00
Thibault Riviere 0f1b0dad01 Merge branch 'rel/7.4' into fwdport_7.4_master 2018-02-07 13:34:03 +01:00
ironman-machine 393d6edc07 merge #408 2018-02-06 23:55:05 +00:00
vrancurel 70638eaf7a support search in Mongo 2018-02-06 14:18:51 -08:00
Lauren Spiegel 9d0156dfdf
Merge pull request #403 from scality/welcome/mongo
Welcome/mongo
2018-02-02 15:10:44 -08:00
Lauren Spiegel 8d8028b83f CHORE: Change filename 2018-02-02 12:08:49 -08:00
Lauren Spiegel b99fe2cd8d Changes to client due to move 2018-02-02 12:08:46 -08:00
Lauren Spiegel cc26f288be Move mongoclient from s3 to arsenal 2018-02-02 11:34:45 -08:00
157 changed files with 17299 additions and 8149 deletions

View File

@ -1 +1,6 @@
{ "extends": "scality" }
{
"extends": "scality",
"parserOptions": {
"ecmaVersion": 2020
}
}

View File

@ -25,24 +25,30 @@ jobs:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
run: yarn install --frozen-lockfile --prefer-offline
run: yarn install --frozen-lockfile --prefer-offline --network-concurrency 1
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
- name: lint yaml
run: yarn --silent lint_yml
- name: lint javascript
run: yarn --silent lint -- --max-warnings 0
run: yarn --silent lint --max-warnings 0
- name: lint markdown
run: yarn --silent lint_md
- name: run unit tests
run: yarn test
- name: add hostname
run: |
sudo sh -c "echo '127.0.0.1 testrequestbucket.localhost' >> /etc/hosts"
- name: test and coverage
run: yarn --silent coverage
- name: run functional tests
run: yarn ft_test
- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: run executables tests
run: yarn install && yarn test
working-directory: 'lib/executables/pensieveCreds/'

12
.swcrc Normal file
View File

@ -0,0 +1,12 @@
{
"$schema": "https://swc.rs/schema.json",
"jsc": {
"parser": {
"syntax": "typescript"
},
"target": "es2017"
},
"module": {
"type": "commonjs"
}
}

View File

@ -1,5 +1,7 @@
# Arsenal
[![codecov](https://codecov.io/gh/scality/Arsenal/branch/development/8.1/graph/badge.svg?token=X0esXhJSwb)](https://codecov.io/gh/scality/Arsenal)
Common utilities for the S3 project components
Within this repository, you will be able to find the shared libraries for the

View File

@ -85,6 +85,66 @@ Used to store the bucket lifecycle configuration info
### Properties Added
```javascript
this._uid = uid || uuid();
```
### Usage
Used to set a unique identifier on a bucket
## Model version 8
### Properties Added
```javascript
this._readLocationConstraint = readLocationConstraint || null;
```
### Usage
Used to store default read location of the bucket
## Model version 9
### Properties Added
```javascript
this._isNFS = isNFS || null;
```
### Usage
Used to determine whether the bucket may be accessed through NFS
## Model version 10
### Properties Added
```javascript
this._ingestion = ingestionConfig || null;
```
### Usage
Used to store the ingestion status of a bucket
## Model version 11
### Properties Added
```javascript
this._azureInfo = azureInfo || null;
```
### Usage
Used to store Azure storage account specific information
## Model version 12
### Properties Added
```javascript
this._objectLockEnabled = objectLockEnabled || false;
this._objectLockConfiguration = objectLockConfiguration || null;
@ -95,7 +155,7 @@ this._objectLockConfiguration = objectLockConfiguration || null;
Used to determine whether object lock capabilities are enabled on a bucket and
to store the object lock configuration of the bucket
## Model version 8
## Model version 13
### Properties Added
@ -107,7 +167,7 @@ this._notificationConfiguration = notificationConfiguration || null;
Used to store the bucket notification configuration info
## Model version 9
## Model version 14
### Properties Added
@ -119,19 +179,7 @@ this._serverSideEncryption.configuredMasterKeyId = configuredMasterKeyId || unde
Used to store the users configured KMS key id
## Model version 10
### Properties Added
```javascript
this._uid = uid || uuid();
```
### Usage
Used to set a unique identifier on a bucket
## Model version 11
## Model version 15
### Properties Added
@ -139,6 +187,74 @@ Used to set a unique identifier on a bucket
this._tags = tags || null;
```
The Tag Set of a bucket is an array of objects with Key and Value:
```javascript
[
{
Key: 'something',
Value: 'some_data'
}
]
```
## Model version 16
### Properties Added
```javascript
this._capabilities = capabilities || undefined;
```
For capacity-enabled buckets, contains the following data:
```javascript
{
_capabilities: {
VeeamSOSApi?: {
SystemInfo?: {
ProtocolVersion: String,
ModelName: String,
ProtocolCapabilities: {
CapacityInfo: Boolean,
UploadSessions: Boolean,
IAMSTS: Boolean,
},
APIEndpoints: {
IAMEndpoint: String,
STSEndpoint: String,
},
SystemRecommendations?: {
S3ConcurrentTaskLimit: Number,
S3MultiObjectDelete: Number,
StorageCurrentTasksLimit: Number,
KbBlockSize: Number,
}
LastModified?: String,
},
CapacityInfo?: {
Capacity: Number,
Available: Number,
Used: Number,
LastModified?: String,
},
}
},
}
```
### Usage
Used to store bucket tagging
## Model version 17
### Properties Added
```javascript
this._quotaMax = quotaMax || 0;
```
### Usage
Used to store bucket quota

28
greenkeeper.json Normal file
View File

@ -0,0 +1,28 @@
{
"groups": {
"default": {
"packages": [
"lib/executables/pensieveCreds/package.json",
"package.json"
]
}
},
"branchPrefix": "improvement/greenkeeper.io/",
"commitMessages": {
"initialBadge": "docs(readme): add Greenkeeper badge",
"initialDependencies": "chore(package): update dependencies",
"initialBranches": "chore(bert-e): whitelist greenkeeper branches",
"dependencyUpdate": "fix(package): update ${dependency} to version ${version}",
"devDependencyUpdate": "chore(package): update ${dependency} to version ${version}",
"dependencyPin": "fix: pin ${dependency} to ${oldVersionResolved}",
"devDependencyPin": "chore: pin ${dependency} to ${oldVersionResolved}",
"closes": "\n\nCloses #${number}"
},
"ignore": [
"ajv",
"eslint",
"eslint-plugin-react",
"eslint-config-airbnb",
"eslint-config-scality"
]
}

View File

@ -1,14 +1,19 @@
import * as evaluators from './lib/policyEvaluator/evaluator';
import evaluatePrincipal from './lib/policyEvaluator/principal';
import RequestContext from './lib/policyEvaluator/RequestContext';
import RequestContext, {
actionNeedQuotaCheck,
actionNeedQuotaCheckCopy,
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
import * as requestUtils from './lib/policyEvaluator/requestUtils';
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
import { validateUserPolicy } from './lib/policy/policyValidator'
import * as locationConstraints from './lib/patches/locationConstraints';
import * as userMetadata from './lib/s3middleware/userMetadata';
import convertToXml from './lib/s3middleware/convertToXml';
import escapeForXml from './lib/s3middleware/escapeForXml';
import * as objectLegalHold from './lib/s3middleware/objectLegalHold';
import * as tagging from './lib/s3middleware/tagging';
import { checkDateModifiedHeaders } from './lib/s3middleware/validateConditionalHeaders';
import { validateConditionalHeaders } from './lib/s3middleware/validateConditionalHeaders';
import MD5Sum from './lib/s3middleware/MD5Sum';
import NullStream from './lib/s3middleware/nullStream';
@ -16,8 +21,10 @@ import * as objectUtils from './lib/s3middleware/objectUtils';
import * as mpuUtils from './lib/s3middleware/azureHelpers/mpuUtils';
import ResultsCollector from './lib/s3middleware/azureHelpers/ResultsCollector';
import SubStreamInterface from './lib/s3middleware/azureHelpers/SubStreamInterface';
import { prepareStream } from './lib/s3middleware/prepareStream';
import * as processMpuParts from './lib/s3middleware/processMpuParts';
import * as retention from './lib/s3middleware/objectRetention';
import * as objectRestore from './lib/s3middleware/objectRestore';
import * as lifecycleHelpers from './lib/s3middleware/lifecycleHelpers';
export { default as errors } from './lib/errors';
export { default as Clustering } from './lib/Clustering';
@ -34,22 +41,15 @@ export * as stream from './lib/stream';
export * as jsutil from './lib/jsutil';
export { default as stringHash } from './lib/stringHash';
export * as db from './lib/db';
export * as errorUtils from './lib/errorUtils';
export { default as shuffle } from './lib/shuffle';
export * as models from './lib/models';
export const algorithms = {
list: {
Basic: require('./lib/algos/list/basic').List,
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
DelimiterVersions: require('./lib/algos/list/delimiterVersions').DelimiterVersions,
DelimiterMaster: require('./lib/algos/list/delimiterMaster').DelimiterMaster,
MPU: require('./lib/algos/list/MPU').MultipartUploads,
DelimiterCurrent: require('./lib/algos/list/delimiterCurrent').DelimiterCurrent,
DelimiterNonCurrent: require('./lib/algos/list/delimiterNonCurrent').DelimiterNonCurrent,
DelimiterOrphanDeleteMarker: require('./lib/algos/list/delimiterOrphanDeleteMarker').DelimiterOrphanDeleteMarker,
},
list: require('./lib/algos/list/exportAlgos'),
listTools: {
DelimiterTools: require('./lib/algos/list/tools'),
Skip: require('./lib/algos/list/skip'),
},
cache: {
GapSet: require('./lib/algos/cache/GapSet'),
@ -70,6 +70,9 @@ export const policies = {
RequestContext,
requestUtils,
actionMaps,
actionNeedQuotaCheck,
actionWithDataDeletion,
actionNeedQuotaCheckCopy,
};
export const testing = {
@ -82,6 +85,7 @@ export const s3middleware = {
escapeForXml,
objectLegalHold,
tagging,
checkDateModifiedHeaders,
validateConditionalHeaders,
MD5Sum,
NullStream,
@ -91,8 +95,10 @@ export const s3middleware = {
ResultsCollector,
SubStreamInterface,
},
prepareStream,
processMpuParts,
retention,
objectRestore,
lifecycleHelpers,
};
@ -163,3 +169,7 @@ export const storage = {
export const pensieve = {
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
};
export const patches = {
locationConstraints,
};

View File

@ -196,6 +196,9 @@ export class Delimiter extends Extension {
}
getCommonPrefix(key: string): string | undefined {
if (!this.delimiter) {
return undefined;
}
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {

View File

@ -183,6 +183,13 @@ export class DelimiterMaster extends Delimiter {
id: DelimiterFilterStateId.NotSkipping,
};
}
} else {
// save base implementation of the `NotSkipping` state in
// Delimiter before overriding it with ours, to be able to call it from there
this.keyHandler_NotSkipping_Delimiter = this.keyHandlers[DelimiterFilterStateId.NotSkipping];
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
}
// in v1, we can directly use Delimiter's implementation,
// which is already set to the proper state
@ -416,6 +423,20 @@ export class DelimiterMaster extends Delimiter {
return this.filter_onNewMasterKeyV0(key, value);
}
filter_onNewMasterKeyV1(key: string, value: string): FilterReturnValue {
// if this master key is a delete marker, accept it without
// adding the version to the contents
if (Version.isDeleteMarker(value)) {
return FILTER_ACCEPT;
}
// use base Delimiter's implementation
return this.keyHandler_NotSkipping_Delimiter(key, value);
}
keyHandler_NotSkippingPrefixNorVersionsV1(key: string, value: string): FilterReturnValue {
return this.filter_onNewMasterKeyV1(key, value);
}
keyHandler_SkippingVersionsV0(key: string, value: string): FilterReturnValue {
/* In the SkippingVersionsV0 state, skip all version keys
* (<key><versionIdSeparator><version>) */

View File

@ -396,6 +396,11 @@ export class DelimiterVersions extends Extension {
}
keyHandler_NotSkippingV1(key: string, versionId: string | undefined, value: string): FilterReturnValue {
// NOTE: this check on PHD is only useful for Artesca, S3C
// does not use PHDs in V1 format
if (Version.isPHD(value)) {
return FILTER_ACCEPT;
}
return this.filter_onNewKey(key, versionId, value);
}

View File

@ -14,7 +14,7 @@ function vaultSignatureCb(
err: Error | null,
authInfo: { message: { body: any } },
log: Logger,
callback: (err: Error | null, data?: any, results?: any, params?: any) => void,
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
streamingV4Params?: any
) {
// vaultclient API guarantees that it returns:
@ -38,7 +38,9 @@ function vaultSignatureCb(
}
// @ts-ignore
log.addDefaultFields(auditLog);
return callback(null, userInfo, authorizationResults, streamingV4Params);
return callback(null, userInfo, authorizationResults, streamingV4Params, {
accountQuota: info.accountQuota || {},
});
}
export type AuthV4RequestParams = {
@ -384,4 +386,19 @@ export default class Vault {
return callback(null, respBody);
});
}
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
// call the report function of the client
if (!this.client.report) {
return callback(null, {});
}
// @ts-ignore
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
if (err) {
log.debug(`error from ${this.implName}`, { error: err });
return callback(err);
}
return callback(null, obj);
});
}
}

View File

@ -9,10 +9,12 @@ import * as constants from '../constants';
import constructStringToSignV2 from './v2/constructStringToSign';
import constructStringToSignV4 from './v4/constructStringToSign';
import { convertUTCtoISO8601 } from './v4/timeUtils';
import * as vaultUtilities from './in_memory/vaultUtilities';
import * as backend from './in_memory/Backend';
import validateAuthConfig from './in_memory/validateAuthConfig';
import AuthLoader from './in_memory/AuthLoader';
import * as vaultUtilities from './backends/in_memory/vaultUtilities';
import * as inMemoryBackend from './backends/in_memory/Backend';
import baseBackend from './backends/base';
import chainBackend from './backends/ChainBackend';
import validateAuthConfig from './backends/in_memory/validateAuthConfig';
import AuthLoader from './backends/in_memory/AuthLoader';
import Vault from './Vault';
let vault: Vault | null = null;
@ -233,7 +235,7 @@ function generateV4Headers(
headerName.startsWith('x-amz-')
|| headerName.startsWith('x-scal-')
|| headerName === 'content-md5'
|| headerName === 'host'
|| headerName === 'host',
).sort().join(';');
const params = { request, signedHeaders, payloadChecksum,
credentialScope, timestamp, query: data,
@ -254,7 +256,8 @@ function generateV4Headers(
export const server = { extractParams, doAuth }
export const client = { generateV4Headers, constructStringToSignV2 }
export const inMemory = { backend, validateAuthConfig, AuthLoader }
export const inMemory = { backend: inMemoryBackend, validateAuthConfig, AuthLoader }
export const backends = { baseBackend, chainBackend }
export {
setAuthHandler as setHandler,
AuthInfo,

View File

@ -0,0 +1,233 @@
import assert from 'assert';
import async from 'async';
import errors from '../../errors';
import BaseBackend from './base';
/**
* Class that provides an authentication backend that will verify signatures
* and retrieve emails and canonical ids associated with an account using a
* given list of authentication backends and vault clients.
*
* @class ChainBackend
*/
export default class ChainBackend extends BaseBackend {
_clients: any[];
/**
* @constructor
* @param {string} service - service id
* @param {object[]} clients - list of authentication backends or vault clients
*/
constructor(service: string, clients: any[]) {
super(service);
assert(Array.isArray(clients) && clients.length > 0, 'invalid client list');
assert(clients.every(client =>
typeof client.verifySignatureV4 === 'function' &&
typeof client.verifySignatureV2 === 'function' &&
typeof client.getCanonicalIds === 'function' &&
typeof client.getEmailAddresses === 'function' &&
typeof client.checkPolicies === 'function' &&
typeof client.healthcheck === 'function',
), 'invalid client: missing required auth backend methods');
this._clients = clients;
}
/*
* try task against each client for one to be successful
*/
_tryEachClient(task: any, cb: any) {
// @ts-ignore
async.tryEach(this._clients.map(client => done => task(client, done)), cb);
}
/*
* apply task to all clients
*/
_forEachClient(task: any, cb: any) {
async.map(this._clients, task, cb);
}
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: any,
callback: any,
) {
this._tryEachClient((client, done) => client.verifySignatureV2(
stringToSign,
signatureFromRequest,
accessKey,
options,
done,
), callback);
}
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
options: any,
callback: any,
) {
this._tryEachClient((client, done) => client.verifySignatureV4(
stringToSign,
signatureFromRequest,
accessKey,
region,
scopeDate,
options,
done,
), callback);
}
static _mergeObjects(objectResponses: any) {
return objectResponses.reduce(
(retObj, resObj) => Object.assign(retObj, resObj.message.body),
{});
}
getCanonicalIds(emailAddresses: string[], options: any, callback: any) {
this._forEachClient(
(client, done) => client.getCanonicalIds(emailAddresses, options, done),
(err, res) => {
if (err) {
return callback(err);
}
// TODO: atm naive merge, better handling of conflicting email results
return callback(null, {
message: {
body: ChainBackend._mergeObjects(res),
},
});
});
}
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
this._forEachClient(
(client, done) => client.getEmailAddresses(canonicalIDs, options, done),
(err, res) => {
if (err) {
return callback(err);
}
return callback(null, {
message: {
body: ChainBackend._mergeObjects(res),
},
});
});
}
/*
* merge policy responses into a single message
*/
static _mergePolicies(policyResponses: any) {
const policyMap: any = {};
policyResponses.forEach(resp => {
if (!resp.message || !Array.isArray(resp.message.body)) {
return;
}
const check = (policy) => {
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
if (!policyMap[key] || !policyMap[key].isAllowed) {
policyMap[key] = policy;
}
// else is duplicate policy
};
resp.message.body.forEach(policy => {
if (Array.isArray(policy)) {
policy.forEach(authResult => check(authResult));
} else {
check(policy);
}
});
});
return Object.keys(policyMap).map(key => {
const policyRes: any = { isAllowed: policyMap[key].isAllowed };
if (policyMap[key].arn !== '') {
policyRes.arn = policyMap[key].arn;
}
if (policyMap[key].versionId) {
policyRes.versionId = policyMap[key].versionId;
}
if (policyMap[key].isImplicit !== undefined) {
policyRes.isImplicit = policyMap[key].isImplicit;
}
if (policyMap[key].action) {
policyRes.action = policyMap[key].action;
}
return policyRes;
});
}
/*
response format:
{ message: {
body: [{}],
code: number,
message: string,
} }
*/
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
this._forEachClient((client, done) => client.checkPolicies(
requestContextParams,
userArn,
options,
done,
), (err, res) => {
if (err) {
return callback(err);
}
return callback(null, {
message: {
body: ChainBackend._mergePolicies(res),
},
});
});
}
healthcheck(reqUid: string, callback: any) {
this._forEachClient((client, done) =>
client.healthcheck(reqUid, (err, res) => done(null, {
error: !!err ? err : null,
status: res,
}),
), (err, res) => {
if (err) {
return callback(err);
}
const isError = res.some(results => !!results.error);
if (isError) {
return callback(errors.InternalError, res);
}
return callback(null, res);
});
}
report(reqUid: string, callback: any) {
this._forEachClient((client, done) =>
client.report(reqUid, done),
(err, res) => {
if (err) {
return callback(err);
}
const mergedRes = res.reduce((acc, val) => {
Object.keys(val).forEach(k => {
acc[k] = val[k];
});
return acc;
}, {});
return callback(null, mergedRes);
});
}
}

96
lib/auth/backends/base.ts Normal file
View File

@ -0,0 +1,96 @@
import errors from '../../errors';
/**
* Base backend class
*
* @class BaseBackend
*/
export default class BaseBackend {
service: string;
/**
* @constructor
* @param {string} service - service identifer for construction arn
*/
constructor(service: string) {
this.service = service;
}
/** verifySignatureV2
* @param stringToSign - string to sign built per AWS rules
* @param signatureFromRequest - signature sent with request
* @param accessKey - account accessKey
* @param options - contains algorithm (SHA1 or SHA256)
* @param callback - callback with either error or user info
* @return calls callback
*/
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: any,
callback: any
) {
return callback(errors.AuthMethodNotImplemented);
}
/** verifySignatureV4
* @param stringToSign - string to sign built per AWS rules
* @param signatureFromRequest - signature sent with request
* @param accessKey - account accessKey
* @param region - region specified in request credential
* @param scopeDate - date specified in request credential
* @param options - options to send to Vault
* (just contains reqUid for logging in Vault)
* @param callback - callback with either error or user info
* @return calls callback
*/
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
options: any,
callback: any
) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets canonical ID's for a list of accounts
* based on email associated with account
* @param emails - list of email addresses
* @param options - to send log id to vault
* @param callback - callback to calling function
* @returns callback with either error or
* object with email addresses as keys and canonical IDs
* as values
*/
getCanonicalIds(emails: string[], options: any, callback: any) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets email addresses (referred to as diplay names for getACL's)
* for a list of accounts based on canonical IDs associated with account
* @param canonicalIDs - list of canonicalIDs
* @param options - to send log id to vault
* @param callback - callback to calling function
* @returns callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an email address as the value (or "NotFound")
*/
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
return callback(errors.AuthMethodNotImplemented);
}
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
return callback(null, { message: { body: [] } });
}
healthcheck(reqUid: string, callback: any) {
return callback(null, { code: 200, message: 'OK' });
}
}

View File

@ -4,7 +4,7 @@ import joi from 'joi';
import werelogs from 'werelogs';
import * as types from './types';
import { Account, Accounts } from './types';
import ARN from '../../models/ARN';
import ARN from '../../../models/ARN';
/** Load authentication information from files or pre-loaded account objects */
export default class AuthLoader {

View File

@ -1,7 +1,9 @@
import * as crypto from 'crypto';
import errors from '../../errors';
import crypto from 'crypto';
import { Logger } from 'werelogs';
import errors from '../../../errors';
import { calculateSigningKey, hashSignature } from './vaultUtilities';
import Indexer from './Indexer';
import BaseBackend from '../base';
import { Accounts } from './types';
function _formatResponse(userInfoToSend: any) {
@ -15,26 +17,32 @@ function _formatResponse(userInfoToSend: any) {
/**
* Class that provides a memory backend for verifying signatures and getting
* emails and canonical ids associated with an account.
*
* @class InMemoryBackend
*/
class Backend {
class InMemoryBackend extends BaseBackend {
indexer: Indexer;
service: string;
formatResponse: any;
constructor(service: string, indexer: Indexer) {
this.service = service;
/**
* @constructor
* @param service - service identifer for construction arn
* @param indexer - indexer instance for retrieving account info
* @param formatter - function which accepts user info to send
* back and returns it in an object
*/
constructor(service: string, indexer: Indexer, formatter: typeof _formatResponse) {
super(service);
this.indexer = indexer;
this.formatResponse = formatter;
}
// CODEQUALITY-TODO-SYNC Should be synchronous
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: { algo: 'SHA256' | 'SHA1' },
callback: (
error: Error | null,
data?: ReturnType<typeof _formatResponse>
) => void
options: any,
callback: any,
) {
const entity = this.indexer.getEntityByKey(accessKey);
if (!entity) {
@ -50,27 +58,21 @@ class Backend {
accountDisplayName: this.indexer.getAcctDisplayName(entity),
canonicalID: entity.canonicalID,
arn: entity.arn,
// TODO Why?
// @ts-ignore
IAMdisplayName: entity.IAMdisplayName,
};
const vaultReturnObject = _formatResponse(userInfoToSend);
const vaultReturnObject = this.formatResponse(userInfoToSend);
return callback(null, vaultReturnObject);
}
// TODO Options not used. Why ?
// CODEQUALITY-TODO-SYNC Should be synchronous
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
_options: { algo: 'SHA256' | 'SHA1' },
callback: (
err: Error | null,
data?: ReturnType<typeof _formatResponse>
) => void
options: any,
callback: any,
) {
const entity = this.indexer.getEntityByKey(accessKey);
if (!entity) {
@ -87,21 +89,14 @@ class Backend {
accountDisplayName: this.indexer.getAcctDisplayName(entity),
canonicalID: entity.canonicalID,
arn: entity.arn,
// TODO Why?
// @ts-ignore
IAMdisplayName: entity.IAMdisplayName,
};
const vaultReturnObject = _formatResponse(userInfoToSend);
const vaultReturnObject = this.formatResponse(userInfoToSend);
return callback(null, vaultReturnObject);
}
// TODO log not used. Why ?
// CODEQUALITY-TODO-SYNC Should be synchronous
getCanonicalIds(
emails: string[],
_log: any,
cb: (err: null, data: { message: { body: any } }) => void
) {
getCanonicalIds(emails: string[], log: Logger, cb: any) {
const results = {};
emails.forEach(email => {
const lowercasedEmail = email.toLowerCase();
@ -121,13 +116,7 @@ class Backend {
return cb(null, vaultReturnObject);
}
// TODO options not used. Why ?
// CODEQUALITY-TODO-SYNC Should be synchronous
getEmailAddresses(
canonicalIDs: string[],
_options: any,
cb: (err: null, data: { message: { body: any } }) => void
) {
getEmailAddresses(canonicalIDs: string[], options: any, cb: any) {
const results = {};
canonicalIDs.forEach(canonicalId => {
const foundEntity = this.indexer.getEntityByCanId(canonicalId);
@ -145,24 +134,17 @@ class Backend {
return cb(null, vaultReturnObject);
}
// TODO options not used. Why ?
// CODEQUALITY-TODO-SYNC Should be synchronous
/**
* Gets accountIds for a list of accounts based on
* the canonical IDs associated with the account
* @param canonicalIDs - list of canonicalIDs
* @param _options - to send log id to vault
* @param options - to send log id to vault
* @param cb - callback to calling function
* @returns The next is wrong. Here to keep archives.
* callback with either error or
* @returns callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an accountId as the value (or "NotFound")
*/
getAccountIds(
canonicalIDs: string[],
_options: any,
cb: (err: null, data: { message: { body: any } }) => void
) {
getAccountIds(canonicalIDs: string[], options: any, cb: any) {
const results = {};
canonicalIDs.forEach(canonicalID => {
const foundEntity = this.indexer.getEntityByCanId(canonicalID);
@ -179,16 +161,34 @@ class Backend {
};
return cb(null, vaultReturnObject);
}
report(log: Logger, callback: any) {
return callback(null, {});
}
}
class S3AuthBackend extends Backend {
constructor(authdata: Accounts) {
super('s3', new Indexer(authdata));
class S3AuthBackend extends InMemoryBackend {
/**
* @constructor
* @param authdata - the authentication config file's data
* @param authdata.accounts - array of account objects
* @param authdata.accounts[].name - account name
* @param authdata.accounts[].email - account email
* @param authdata.accounts[].arn - IAM resource name
* @param authdata.accounts[].canonicalID - account canonical ID
* @param authdata.accounts[].shortid - short account ID
* @param authdata.accounts[].keys - array of key objects
* @param authdata.accounts[].keys[].access - access key
* @param authdata.accounts[].keys[].secret - secret key
*/
constructor(authdata?: Accounts) {
super('s3', new Indexer(authdata), _formatResponse);
}
refreshAuthData(authData: Accounts) {
refreshAuthData(authData?: Accounts) {
this.indexer = new Indexer(authData);
}
}
export { S3AuthBackend as s3 };
export { S3AuthBackend as s3 }

View File

@ -42,37 +42,40 @@ export default function awsURIencode(
if (typeof input !== 'string') {
return '';
}
// precalc slash and star based on configs
let encoded = "";
const slash = encodeSlash === undefined || encodeSlash ? '%2F' : '/';
const star = noEncodeStar !== undefined && noEncodeStar ? '*' : '%2A';
const encoded: string[] = [];
const charArray = Array.from(input);
for (const ch of charArray) {
switch (true) {
case ch >= 'A' && ch <= 'Z':
case ch >= 'a' && ch <= 'z':
case ch >= '0' && ch <= '9':
case ch === '-':
case ch === '_':
case ch === '~':
case ch === '.':
encoded.push(ch);
break;
case ch === '/':
encoded.push(slash);
break;
case ch === '*':
encoded.push(star);
break;
case ch === ' ':
encoded.push('%20');
break;
default:
encoded.push(_toHexUTF8(ch));
break;
for (let i = 0; i < input.length; i++) {
let ch = input.charAt(i);
if ((ch >= 'A' && ch <= 'Z') ||
(ch >= 'a' && ch <= 'z') ||
(ch >= '0' && ch <= '9') ||
ch === '_' || ch === '-' ||
ch === '~' || ch === '.') {
encoded = encoded.concat(ch);
} else if (ch === ' ') {
encoded = encoded.concat('%20');
} else if (ch === '/') {
encoded = encoded.concat(slash);
} else if (ch === '*') {
encoded = encoded.concat(star);
} else {
if (ch >= '\uD800' && ch <= '\uDBFF') {
// If this character is a high surrogate peek the next character
// and join it with this one if the next character is a low
// surrogate.
// Otherwise the encoded URI will contain the two surrogates as
// two distinct UTF-8 sequences which is not valid UTF-8.
if (i + 1 < input.length) {
const ch2 = input.charAt(i + 1);
if (ch2 >= '\uDC00' && ch2 <= '\uDFFF') {
i++;
ch += ch2;
}
}
return encoded.join('');
}
encoded = encoded.concat(_toHexUTF8(ch));
}
}
return encoded;
}

View File

@ -132,6 +132,17 @@ export function check(
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath: string | undefined;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath, err });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
const stringToSign = constructStringToSign({
log,
request,
@ -141,6 +152,7 @@ export function check(
timestamp,
payloadChecksum,
awsService: service,
proxyPath,
});
log.trace('constructed stringToSign', { stringToSign });
if (stringToSign instanceof Error) {

View File

@ -56,6 +56,17 @@ export function check(request: any, log: Logger, data: { [key: string]: string }
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath: string | undefined;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
// In query v4 auth, the canonical request needs
// to include the query params OTHER THAN
// the signature so create a
@ -81,6 +92,7 @@ export function check(request: any, log: Logger, data: { [key: string]: string }
credentialScope:
`${scopeDate}/${region}/${service}/${requestType}`,
awsService: service,
proxyPath,
});
if (stringToSign instanceof Error) {
return { err: stringToSign };

View File

@ -3,7 +3,7 @@ import async from 'async';
import errors from '../../../errors';
import { Logger } from 'werelogs';
import Vault, { AuthV4RequestParams } from '../../Vault';
import { Callback } from '../../in_memory/types';
import { Callback } from '../../backends/in_memory/types';
import constructChunkStringToSign from './constructChunkStringToSign';

View File

@ -83,7 +83,7 @@ export type ResultObject = {
export type CommandPromise = {
resolve: (results?: ResultObject[]) => void;
reject: (error: Error) => void;
timeout: NodeJS.Timer | null;
timeout: NodeJS.Timeout | null;
};
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
@ -254,7 +254,7 @@ export async function sendWorkerCommand(
}
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
return new Promise((resolve, reject) => {
let timeout: NodeJS.Timer | null = null;
let timeout: NodeJS.Timeout | null = null;
if (timeoutMs) {
timeout = setTimeout(() => {
delete uidsToCommandPromise[uids];

View File

@ -2,18 +2,18 @@ import * as crypto from 'crypto';
// The min value here is to manage further backward compat if we
// need it
const iamSecurityTokenSizeMin = 128;
const iamSecurityTokenSizeMax = 128;
// Security token is an hex string (no real format from amazon)
const iamSecurityTokenPattern = new RegExp(
`^[a-f0-9]{${iamSecurityTokenSizeMin},${iamSecurityTokenSizeMax}}$`,
);
// Default value
export const vaultGeneratedIamSecurityTokenSizeMin = 128;
// Safe to assume that a typical token size is less than 8192 bytes
export const vaultGeneratedIamSecurityTokenSizeMax = 8192;
// Base-64
export const vaultGeneratedIamSecurityTokenPattern = /^[A-Za-z0-9/+=]*$/;
// info about the iam security token
export const iamSecurityToken = {
min: iamSecurityTokenSizeMin,
max: iamSecurityTokenSizeMax,
pattern: iamSecurityTokenPattern,
min: vaultGeneratedIamSecurityTokenSizeMin,
max: vaultGeneratedIamSecurityTokenSizeMax,
pattern: vaultGeneratedIamSecurityTokenPattern,
};
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
@ -22,6 +22,7 @@ export const publicId = 'http://acs.amazonaws.com/groups/global/AllUsers';
export const zenkoServiceAccount = 'http://acs.zenko.io/accounts/service';
export const metadataFileNamespace = '/MDFile';
export const dataFileURL = '/DataFile';
export const passthroughFileURL = '/PassthroughFile';
// AWS states max size for user-defined metadata
// (x-amz-meta- headers) is 2 KB:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
@ -31,7 +32,16 @@ export const maximumMetaHeadersSize = 2136;
export const emptyFileMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
// Version 2 changes the format of the data location property
// Version 3 adds the dataStoreName attribute
export const mdModelVersion = 3;
// Version 4 add the Creation-Time and Content-Language attributes,
// and add support for x-ms-meta-* headers in UserMetadata
// Version 5 adds the azureInfo structure
// Version 6 adds a "deleted" flag that is updated to true before
// the object gets deleted. This is done to keep object metadata in the
// oplog when deleting the object, as oplog deletion events don't contain
// any metadata of the object.
// version 6 also adds the "isPHD" flag that is used to indicate that the master
// object is a placeholder and is not up to date.
export const mdModelVersion = 6;
/*
* Splitter is used to build the object name for the overview of a
* multipart upload and to build the object names for each part of a
@ -71,19 +81,45 @@ export const mpuBucketPrefix = 'mpuShadowBucket';
export const permittedCapitalizedBuckets = {
METADATA: true,
};
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
export const objectKeyByteLimit = 915;
/* delimiter for location-constraint. The location constraint will be able
* to include the ingestion flag
*/
export const zenkoSeparator = ':';
/* eslint-disable camelcase */
export const externalBackends = { aws_s3: true, azure: true, gcp: true, pfs: true }
export const hasCopyPartBackends = { aws_s3: true, gcp: true }
export const versioningNotImplBackends = { azure: true, gcp: true }
export const mpuMDStoredExternallyBackend = { aws_s3: true, gcp: true }
export const externalBackends = { aws_s3: true, azure: true, gcp: true, pfs: true };
export const replicationBackends = { aws_s3: true, azure: true, gcp: true };
// hex digest of sha256 hash of empty string:
export const emptyStringHash = crypto.createHash('sha256')
.update('', 'binary').digest('hex');
export const mpuMDStoredExternallyBackend = { aws_s3: true, gcp: true };
// AWS sets a minimum size limit for parts except for the last part.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
export const minimumAllowedPartSize = 5242880;
// hex digest of sha256 hash of empty string:
export const emptyStringHash = crypto.createHash('sha256').update('', 'binary').digest('hex');
export const gcpMaximumAllowedPartCount = 1024;
// GCP Object Tagging Prefix
export const gcpTaggingPrefix = 'aws-tag-';
export const productName = 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko';
export const legacyLocations = ['sproxyd', 'legacy'];
// healthcheck default call from nginx is every 2 seconds
// for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call
export const externalBackendHealthCheckInterval = 60000;
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
export const clientsRequireStringKey = { sproxyd: true, cdmi: true };
export const hasCopyPartBackends = { aws_s3: true, gcp: true };
export const versioningNotImplBackends = { azure: true, gcp: true };
// user metadata applied on zenko-created objects
export const zenkoIDHeader = 'x-amz-meta-zenko-instance-id';
// Default expiration value of the S3 pre-signed URL duration
// 604800 seconds (seven days).
export const legacyLocations = ['sproxyd', 'legacy'];
export const defaultPreSignedURLExpiry = 7 * 24 * 60 * 60;
// Regex for ISO-8601 formatted date
export const shortIso8601Regex = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/;
@ -96,16 +132,21 @@ export const supportedNotificationEvents = new Set([
's3:ObjectRemoved:*',
's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated',
's3:Replication:OperationFailedReplication',
's3:ObjectTagging:*',
's3:ObjectTagging:Put',
's3:ObjectTagging:Delete',
's3:ObjectAcl:Put',
's3:ObjectRestore:*',
's3:ObjectRestore:Post',
's3:ObjectRestore:Completed',
's3:ObjectRestore:Delete',
's3:LifecycleTransition',
's3:LifecycleExpiration:*',
's3:LifecycleExpiration:DeleteMarkerCreated',
's3:LifecycleExpiration:Delete',
]);
export const notificationArnPrefix = 'arn:scality:bucketnotif';
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
export const clientsRequireStringKey = { sproxyd: true, cdmi: true };
// HTTP server keep-alive timeout is set to a higher value than
// client's free sockets timeout to avoid the risk of triggering
// ECONNRESET errors if the server closes the connection at the
@ -122,10 +163,14 @@ export const supportedLifecycleRules = [
'expiration',
'noncurrentVersionExpiration',
'abortIncompleteMultipartUpload',
'transitions',
'noncurrentVersionTransition',
];
// Maximum number of buckets to cache (bucket metadata)
export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
Number(process.env.METADATA_MAX_CACHED_BUCKETS) : 1000;
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
export const maxBatchingConcurrentOperations = 5;
/** For policy resource arn check we allow empty account ID to not break compatibility */

View File

@ -1042,3 +1042,15 @@ export const AuthMethodNotImplemented: ErrorFormat = {
description: 'AuthMethodNotImplemented',
code: 501,
};
// --------------------- quotaErros ---------------------
export const NoSuchQuota: ErrorFormat = {
code: 404,
description: 'The specified resource does not have a quota.',
};
export const QuotaExceeded: ErrorFormat = {
code: 429,
description: 'The quota set for the resource is exceeded.',
};

View File

@ -2,7 +2,7 @@ import type { ServerResponse } from 'http';
import * as rawErrors from './arsenalErrors';
/** All possible errors names. */
export type Name = keyof typeof rawErrors
export type Name = keyof typeof rawErrors;
/** Object containing all errors names. It has the format { [Name]: "Name" } */
export type Names = { [Name_ in Name]: Name_ };
/** Mapping used to determine an error type. It has the format { [Name]: boolean } */
@ -13,7 +13,7 @@ export type Errors = { [_ in Name]: ArsenalError };
// This object is reused constantly through createIs, we store it there
// to avoid recomputation.
const isBase = Object.fromEntries(
Object.keys(rawErrors).map(key => [key, false])
Object.keys(rawErrors).map((key) => [key, false])
) as Is;
// This allows to conditionally add the old behavior of errors to properly
@ -32,7 +32,7 @@ export const allowUnsafeErrComp = (
// the Proxy will return false.
const createIs = (type: Name): Is => {
const get = (is: Is, value: string | symbol) => is[value] ?? false;
const final = Object.freeze({ ...isBase, [type]: true })
const final = Object.freeze({ ...isBase, [type]: true });
return new Proxy(final, { get });
};
@ -46,13 +46,18 @@ export class ArsenalError extends Error {
/** Object used to determine the error type.
* Example: error.is.InternalError */
#is: Is;
/** A map of error metadata (can be extra fields
* that only show in debug mode) */
#metadata: Map<string, Object[]>;
private constructor(type: Name, code: number, description: string) {
private constructor(type: Name, code: number, description: string,
metadata?: Map<string, Object[]>) {
super(type);
this.#code = code;
this.#description = description;
this.#type = type;
this.#is = createIs(type);
this.#metadata = metadata ?? new Map<string, Object[]>();
// This restores the old behavior of errors, to make sure they're now
// backward-compatible. Fortunately it's handled by TS, but it cannot
@ -106,7 +111,22 @@ export class ArsenalError extends Error {
customizeDescription(description: string): ArsenalError {
const type = this.#type;
const code = this.#code;
return new ArsenalError(type, code, description);
const metadata = new Map(this.#metadata);
const err = new ArsenalError(type, code, description, metadata);
err.stack = this.stack;
return err;
}
/** Clone the error with a new metadata field */
addMetadataEntry(key: string, value: Object[]): ArsenalError {
const type = this.#type;
const code = this.#code;
const description = this.#description;
const metadata = new Map(this.#metadata);
metadata.set(key, value);
const err = new ArsenalError(type, code, description, metadata);
err.stack = this.stack;
return err;
}
/** Used to determine the error type. Example: error.is.InternalError */
@ -131,9 +151,14 @@ export class ArsenalError extends Error {
return this.#type;
}
/** A map of error metadata */
get metadata() {
return this.#metadata;
}
/** Generate all possible errors. An instance is created by default. */
static errors() {
const errors = {}
const errors = {};
Object.entries(rawErrors).forEach((value) => {
const name = value[0] as Name;
const error = value[1];
@ -141,7 +166,7 @@ export class ArsenalError extends Error {
const get = () => new ArsenalError(name, code, description);
Object.defineProperty(errors, name, { get });
});
return errors as Errors
return errors as Errors;
}
}

View File

@ -7,8 +7,8 @@
"test": "mocha --recursive --timeout 5500 tests/unit"
},
"dependencies": {
"mocha": "2.5.3",
"async": "^2.6.0",
"mocha": "5.2.0",
"async": "~2.6.1",
"node-forge": "^0.7.1"
}
}

View File

@ -20,7 +20,32 @@ export default class RedisClient {
return this;
}
/** increment value of a key by 1 and set a ttl */
/**
* scan a pattern and return matching keys
* @param pattern - string pattern to match with all existing keys
* @param [count=10] - scan count
* @param cb - callback (error, result)
*/
scan(pattern: string, count = 10, cb: Callback) {
const params = { match: pattern, count };
const keys: any[] = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/** increment value of a key by 1 and set a ttl
* @param key - key holding the value
* @param expiry - expiry in seconds
* @param cb - callback
*/
incrEx(key: string, expiry: number, cb: Callback) {
const exp = expiry.toString();
return this._client
@ -28,7 +53,22 @@ export default class RedisClient {
.exec(cb);
}
/** increment value of a key by a given amount and set a ttl */
/**
* increment value of a key by a given amount
* @param key - key holding the value
* @param amount - amount to increase by
* @param cb - callback
*/
incrby(key: string, amount: number, cb: Callback) {
return this._client.incrby(key, amount, cb);
}
/** increment value of a key by a given amount and set a ttl
* @param key - key holding the value
* @param amount - amount to increase by
* @param expiry - expiry in seconds
* @param cb - callback
*/
incrbyEx(key: string, amount: number, expiry: number, cb: Callback) {
const am = amount.toString();
const exp = expiry.toString();
@ -37,13 +77,29 @@ export default class RedisClient {
.exec(cb);
}
/** execute a batch of commands */
/**
* decrement value of a key by a given amount
* @param key - key holding the value
* @param amount - amount to increase by
* @param cb - callback
*/
decrby(key: string, amount: number, cb: Callback) {
return this._client.decrby(key, amount, cb);
}
/**
* execute a batch of commands
* @param cmds - list of commands
* @param cb - callback
* @return
*/
batch(cmds: string[][], cb: Callback) {
return this._client.pipeline(cmds).exec(cb);
}
/**
* Checks if a key exists
* @param key - name of key
* @param cb - callback
* If cb response returns 0, key does not exist.
* If cb response returns 1, key exists.
@ -52,10 +108,22 @@ export default class RedisClient {
return this._client.exists(key, cb);
}
/**
* get value stored at key
* @param key - key holding the value
* @param cb - callback
*/
get(key: string, cb: Callback) {
return this._client.get(key, cb);
}
/**
* Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key.
* @param key - name of key
* @param score - score used to order set
* @param value - value to store
* @param cb - callback
*/
zadd(key: string, score: number, value: string, cb: Callback) {
return this._client.zadd(key, score, value, cb);
@ -66,6 +134,8 @@ export default class RedisClient {
* Note: using this on a key that does not exist will return 0.
* Note: using this on an existing key that isn't a sorted set will
* return an error WRONGTYPE.
* @param key - name of key
* @param cb - callback
*/
zcard(key: string, cb: Callback) {
return this._client.zcard(key, cb);
@ -76,6 +146,9 @@ export default class RedisClient {
* Note: using this on a key that does not exist will return nil.
* Note: using this on a value that does not exist in a valid sorted set key
* will return nil.
* @param key - name of key
* @param value - value within sorted set
* @param cb - callback
*/
zscore(key: string, value: string, cb: Callback) {
return this._client.zscore(key, value, cb);
@ -83,8 +156,10 @@ export default class RedisClient {
/**
* Remove a value from a sorted set
* @param value - value within sorted set. Can specify multiple values within an array
* @param {function} cb - callback
* @param key - name of key
* @param value - value within sorted set. Can specify
* multiple values within an array
* @param cb - callback
* The cb response returns number of values removed
*/
zrem(key: string, value: string | string[], cb: Callback) {
@ -93,8 +168,10 @@ export default class RedisClient {
/**
* Get specified range of elements in a sorted set
* @param key - name of key
* @param start - start index (inclusive)
* @param end - end index (inclusive) (can use -1)
* @param cb - callback
*/
zrange(key: string, start: number, end: number, cb: Callback) {
return this._client.zrange(key, start, end, cb);
@ -102,10 +179,12 @@ export default class RedisClient {
/**
* Get range of elements in a sorted set based off score
* @param key - name of key
* @param min - min score value (inclusive)
* (can use "-inf")
* @param max - max score value (inclusive)
* (can use "+inf")
* @param cb - callback
*/
zrangebyscore(
key: string,
@ -116,6 +195,15 @@ export default class RedisClient {
return this._client.zrangebyscore(key, min, max, cb);
}
/**
* get TTL or expiration in seconds
* @param key - name of key
* @param cb - callback
*/
ttl(key: string, cb: Callback) {
return this._client.ttl(key, cb);
}
clear(cb: Callback) {
return this._client.flushdb(cb);
}
@ -123,4 +211,8 @@ export default class RedisClient {
disconnect() {
this._client.disconnect();
}
listClients(cb: Callback) {
return this._client.client('list', cb);
}
}

View File

@ -2,6 +2,8 @@ import async from 'async';
import RedisClient from './RedisClient';
import { Logger } from 'werelogs';
export type Callback = (error: Error | null, value?: any) => void;
export default class StatsClient {
_redis: RedisClient;
_interval: number;
@ -48,7 +50,7 @@ export default class StatsClient {
* @param d - Date instance
* @return key - key for redis
*/
_buildKey(name: string, d: Date): string {
buildKey(name: string, d: Date): string {
return `${name}:${this._normalizeTimestamp(d)}`;
}
@ -91,11 +93,33 @@ export default class StatsClient {
amount = (typeof incr === 'number') ? incr : 1;
}
const key = this._buildKey(`${id}:requests`, new Date());
const key = this.buildKey(`${id}:requests`, new Date());
return this._redis.incrbyEx(key, amount, this._expiry, callback);
}
/**
* Increment the given key by the given value.
* @param key - The Redis key to increment
* @param incr - The value to increment by
* @param [cb] - callback
*/
incrementKey(key: string, incr: number, cb: Callback) {
const callback = cb || this._noop;
return this._redis.incrby(key, incr, callback);
}
/**
* Decrement the given key by the given value.
* @param key - The Redis key to decrement
* @param decr - The value to decrement by
* @param [cb] - callback
*/
decrementKey(key: string, decr: number, cb: Callback) {
const callback = cb || this._noop;
return this._redis.decrby(key, decr, callback);
}
/**
* report/record a request that ended up being a 500 on the server
* @param id - service identifier
@ -105,10 +129,53 @@ export default class StatsClient {
return undefined;
}
const callback = cb || this._noop;
const key = this._buildKey(`${id}:500s`, new Date());
const key = this.buildKey(`${id}:500s`, new Date());
return this._redis.incrEx(key, this._expiry, callback);
}
/**
* wrapper on `getStats` that handles a list of keys
* @param log - Werelogs request logger
* @param ids - service identifiers
* @param cb - callback to call with the err/result
*/
getAllStats(log: Logger, ids: string[], cb: Callback) {
if (!this._redis) {
return cb(null, {});
}
const statsRes = {
'requests': 0,
'500s': 0,
'sampleDuration': this._expiry,
};
let requests = 0;
let errors = 0;
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id: string, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests += res.requests;
errors += res['500s'];
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsClient.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = requests;
statsRes['500s'] = errors;
return cb(null, statsRes);
});
}
/**
* get stats for the last x seconds, x being the sampling duration
* @param log - Werelogs request logger
@ -123,8 +190,8 @@ export default class StatsClient {
const reqsKeys: ['get', string][] = [];
const req500sKeys: ['get', string][] = [];
for (let i = 0; i < totalKeys; i++) {
reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]);
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
this._setPrevInterval(d);
}
return async.parallel([

View File

@ -1,4 +1,8 @@
import StatsClient from './StatsClient';
import { Logger } from 'werelogs';
import async from 'async';
export type Callback = (error: Error | null, value?: any) => void;
/**
* @class StatsModel
@ -7,12 +11,145 @@ import StatsClient from './StatsClient';
* rather than by seconds
*/
export default class StatsModel extends StatsClient {
/**
* Utility method to convert 2d array rows to columns, and vice versa
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
* @param arrays - 2d array of integers
* @return converted array
*/
_zip(arrays: number[][]) {
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
return arrays[0].map((_, i) => arrays.map(a => a[i]));
}
return [];
}
/**
* normalize to the nearest interval
* @param d - Date instance
* @return timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d: Date) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the count as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return array of integers, ordered from most recent interval to
* oldest interval with length of (expiry / interval)
*/
// @ts-expect-errors
_getCount(arr: [any, string | null][]): number[] {
const size = Math.floor(this._expiry / this._interval);
const array = arr.reduce((store, i) => {
let num = parseInt(i[1] ??'', 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, [] as number[]);
if (array.length < size) {
array.push(...Array(size - array.length).fill(0));
}
return array;
}
/**
* wrapper on `getStats` that handles a list of keys
* override the method to reduce the returned 2d array from `_getCount`
* @param log - Werelogs request logger
* @param ids - service identifiers
* @param cb - callback to call with the err/result
*/
getAllStats(log: Logger, ids: string[], cb: Callback) {
if (!this._redis) {
return cb(null, {});
}
const size = Math.floor(this._expiry / this._interval);
const statsRes = {
'requests': Array(size).fill(0),
'500s': Array(size).fill(0),
'sampleDuration': this._expiry,
};
const requests: any[] = [];
const errors: any[] = [];
if (ids.length === 0) {
return cb(null, statsRes);
}
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests.push(res.requests);
errors.push(res['500s']);
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsModel.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = this._zip(requests).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
statsRes['500s'] = this._zip(errors).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
return cb(null, statsRes);
});
}
/**
* Handles getting a list of global keys.
* @param ids - Service identifiers
* @param log - Werelogs request logger
* @param cb - Callback
*/
getAllGlobalStats(ids: string[], log: Logger, cb: Callback) {
const reqsKeys = ids.map(key => (['get', key]));
return this._redis.batch(reqsKeys, (err, res) => {
const statsRes = { requests: 0 };
if (err) {
log.error('error getting metrics', {
error: err,
method: 'StatsClient.getAllGlobalStats',
});
return cb(null, statsRes);
}
statsRes.requests = res.reduce((sum, curr) => {
const [cmdErr, val] = curr;
if (cmdErr) {
// Log any individual request errors from the batch request.
log.error('error getting metrics', {
error: cmdErr,
method: 'StatsClient.getAllGlobalStats',
});
}
return sum + (Number.parseInt(val, 10) || 0);
}, 0);
return cb(null, statsRes);
});
}
/**
* normalize date timestamp to the nearest hour
* @param d - Date instance
* @return timestamp - normalized to the nearest hour
*/
normalizeTimestampByHour(d: Date): number {
normalizeTimestampByHour(d: Date) {
return d.setMinutes(0, 0, 0);
}
@ -21,40 +158,10 @@ export default class StatsModel extends StatsClient {
* @param d - Date instance
* @return timestamp - one hour prior to date passed
*/
_getDatePreviousHour(d: Date): number {
_getDatePreviousHour(d: Date) {
return d.setHours(d.getHours() - 1);
}
/**
* normalize to the nearest interval
* @param d - Date instance
* @return timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d: Date): number {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the result as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return array of integers, ordered from most recent interval to
* oldest interval
*/
// @ts-ignore
// TODO change name or conform to parent class method
_getCount(arr: [any, string | null][]) {
return arr.reduce<number[]>((store, i) => {
let num = parseInt(i[1] ?? '', 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
}
/**
* get list of sorted set key timestamps
* @param epoch - epoch time

View File

@ -0,0 +1,281 @@
export type DeleteRetentionPolicy = {
enabled: boolean;
days: number;
};
/**
* Helper class to ease access to the Azure specific information for
* storage accounts mapped to buckets.
*/
export default class BucketAzureInfo {
_data: {
sku: string;
accessTier: string;
kind: string;
systemKeys: string[];
tenantKeys: string[];
subscriptionId: string;
resourceGroup: string;
deleteRetentionPolicy: DeleteRetentionPolicy;
managementPolicies: any[];
httpsOnly: boolean;
tags: any;
networkACL: any[];
cname: string;
azureFilesAADIntegration: boolean;
hnsEnabled: boolean;
logging: any;
hourMetrics: any;
minuteMetrics: any;
serviceVersion: string;
}
/**
* @constructor
* @param obj - Raw structure for the Azure info on storage account
* @param obj.sku - SKU name of this storage account
* @param obj.accessTier - Access Tier name of this storage account
* @param obj.kind - Kind name of this storage account
* @param obj.systemKeys - pair of shared keys for the system
* @param obj.tenantKeys - pair of shared keys for the tenant
* @param obj.subscriptionId - subscription ID the storage account
* belongs to
* @param obj.resourceGroup - Resource group name the storage
* account belongs to
* @param obj.deleteRetentionPolicy - Delete retention policy
* @param obj.deleteRetentionPolicy.enabled -
* @param obj.deleteRetentionPolicy.days -
* @param obj.managementPolicies - Management policies for this
* storage account
* @param obj.httpsOnly - Server the content of this storage
* account through HTTPS only
* @param obj.tags - Set of tags applied on this storage account
* @param obj.networkACL - Network ACL of this storage account
* @param obj.cname - CNAME of this storage account
* @param obj.azureFilesAADIntegration - whether or not Azure
* Files AAD Integration is enabled for this storage account
* @param obj.hnsEnabled - whether or not a hierarchical namespace
* is enabled for this storage account
* @param obj.logging - service properties: logging
* @param obj.hourMetrics - service properties: hourMetrics
* @param obj.minuteMetrics - service properties: minuteMetrics
* @param obj.serviceVersion - service properties: serviceVersion
*/
constructor(obj: {
sku: string;
accessTier: string;
kind: string;
systemKeys: string[];
tenantKeys: string[];
subscriptionId: string;
resourceGroup: string;
deleteRetentionPolicy: DeleteRetentionPolicy;
managementPolicies: any[];
httpsOnly: boolean;
tags: any;
networkACL: any[];
cname: string;
azureFilesAADIntegration: boolean;
hnsEnabled: boolean;
logging: any;
hourMetrics: any;
minuteMetrics: any;
serviceVersion: string;
}) {
this._data = {
sku: obj.sku,
accessTier: obj.accessTier,
kind: obj.kind,
systemKeys: obj.systemKeys,
tenantKeys: obj.tenantKeys,
subscriptionId: obj.subscriptionId,
resourceGroup: obj.resourceGroup,
deleteRetentionPolicy: obj.deleteRetentionPolicy,
managementPolicies: obj.managementPolicies,
httpsOnly: obj.httpsOnly,
tags: obj.tags,
networkACL: obj.networkACL,
cname: obj.cname,
azureFilesAADIntegration: obj.azureFilesAADIntegration,
hnsEnabled: obj.hnsEnabled,
logging: obj.logging,
hourMetrics: obj.hourMetrics,
minuteMetrics: obj.minuteMetrics,
serviceVersion: obj.serviceVersion,
};
}
getSku() {
return this._data.sku;
}
setSku(sku: string) {
this._data.sku = sku;
return this;
}
getAccessTier() {
return this._data.accessTier;
}
setAccessTier(accessTier: string) {
this._data.accessTier = accessTier;
return this;
}
getKind() {
return this._data.kind;
}
setKind(kind: string) {
this._data.kind = kind;
return this;
}
getSystemKeys() {
return this._data.systemKeys;
}
setSystemKeys(systemKeys: string[]) {
this._data.systemKeys = systemKeys;
return this;
}
getTenantKeys() {
return this._data.tenantKeys;
}
setTenantKeys(tenantKeys: string[]) {
this._data.tenantKeys = tenantKeys;
return this;
}
getSubscriptionId() {
return this._data.subscriptionId;
}
setSubscriptionId(subscriptionId: string) {
this._data.subscriptionId = subscriptionId;
return this;
}
getResourceGroup() {
return this._data.resourceGroup;
}
setResourceGroup(resourceGroup: string) {
this._data.resourceGroup = resourceGroup;
return this;
}
getDeleteRetentionPolicy() {
return this._data.deleteRetentionPolicy;
}
setDeleteRetentionPolicy(deleteRetentionPolicy: DeleteRetentionPolicy) {
this._data.deleteRetentionPolicy = deleteRetentionPolicy;
return this;
}
getManagementPolicies() {
return this._data.managementPolicies;
}
setManagementPolicies(managementPolicies: any[]) {
this._data.managementPolicies = managementPolicies;
return this;
}
getHttpsOnly() {
return this._data.httpsOnly;
}
setHttpsOnly(httpsOnly: boolean) {
this._data.httpsOnly = httpsOnly;
return this;
}
getTags() {
return this._data.tags;
}
setTags(tags: any) {
this._data.tags = tags;
return this;
}
getNetworkACL() {
return this._data.networkACL;
}
setNetworkACL(networkACL: any[]) {
this._data.networkACL = networkACL;
return this;
}
getCname() {
return this._data.cname;
}
setCname(cname: string) {
this._data.cname = cname;
return this;
}
getAzureFilesAADIntegration() {
return this._data.azureFilesAADIntegration;
}
setAzureFilesAADIntegration(azureFilesAADIntegration: boolean) {
this._data.azureFilesAADIntegration = azureFilesAADIntegration;
return this;
}
getHnsEnabled() {
return this._data.hnsEnabled;
}
setHnsEnabled(hnsEnabled: boolean) {
this._data.hnsEnabled = hnsEnabled;
return this;
}
getLogging() {
return this._data.logging;
}
setLogging(logging: any) {
this._data.logging = logging;
return this;
}
getHourMetrics() {
return this._data.hourMetrics;
}
setHourMetrics(hourMetrics: any) {
this._data.hourMetrics = hourMetrics;
return this;
}
getMinuteMetrics() {
return this._data.minuteMetrics;
}
setMinuteMetrics(minuteMetrics: any) {
this._data.minuteMetrics = minuteMetrics;
return this;
}
getServiceVersion() {
return this._data.serviceVersion;
}
setServiceVersion(serviceVersion: any) {
this._data.serviceVersion = serviceVersion;
return this;
}
getValue() {
return this._data;
}
}

View File

@ -8,10 +8,12 @@ import ObjectLockConfiguration from './ObjectLockConfiguration';
import BucketPolicy from './BucketPolicy';
import NotificationConfiguration from './NotificationConfiguration';
import { ACL as OACL } from './ObjectMD';
import { areTagsValid, BucketTag } from '../s3middleware/tagging';
// WHEN UPDATING THIS NUMBER, UPDATE BucketInfoModelVersion.md CHANGELOG
// BucketInfoModelVersion.md can be found in the root of this repository
const modelVersion = 10;
// BucketInfoModelVersion.md can be found in documentation/ at the root
// of this repository
const modelVersion = 16;
export type CORS = {
id: string;
@ -35,6 +37,41 @@ export type VersioningConfiguration = {
MfaDelete: any;
};
export type VeeamSOSApi = {
SystemInfo?: {
ProtocolVersion: string,
ModelName: string,
ProtocolCapabilities: {
CapacityInfo: boolean,
UploadSessions: boolean,
IAMSTS?: boolean,
},
APIEndpoints?: {
IAMEndpoint: string,
STSEndpoint: string,
},
SystemRecommendations?: {
S3ConcurrentTaskLimit: number,
S3MultiObjectDelete: number,
StorageCurrentTasksLimit: number,
KbBlockSize: number,
}
LastModified?: string,
},
CapacityInfo?: {
Capacity: number,
Available: number,
Used: number,
LastModified?: string,
},
};
// Capabilities contains all specifics from external products supported by
// our S3 implementation, at bucket level
export type Capabilities = {
VeeamSOSApi?: VeeamSOSApi,
};
export type ACL = OACL & { WRITE: string[] }
export default class BucketInfo {
@ -58,56 +95,70 @@ export default class BucketInfo {
_objectLockEnabled?: boolean;
_objectLockConfiguration?: any;
_notificationConfiguration?: any;
_tags?: { key: string; value: string }[] | null;
_tags?: Array<BucketTag>;
_readLocationConstraint: string | null;
_isNFS: boolean | null;
_azureInfo: any | null;
_ingestion: { status: 'enabled' | 'disabled' } | null;
_capabilities?: Capabilities;
_quotaMax: number | 0;
/**
* Represents all bucket information.
* @constructor
* @param {string} name - bucket name
* @param {string} owner - bucket owner's name
* @param {string} ownerDisplayName - owner's display name
* @param {object} creationDate - creation date of bucket
* @param {number} mdBucketModelVersion - bucket model version
* @param {object} [acl] - bucket ACLs (no need to copy
* @param name - bucket name
* @param owner - bucket owner's name
* @param ownerDisplayName - owner's display name
* @param creationDate - creation date of bucket
* @param mdBucketModelVersion - bucket model version
* @param [acl] - bucket ACLs (no need to copy
* ACL object since referenced object will not be used outside of
* BucketInfo instance)
* @param {boolean} transient - flag indicating whether bucket is transient
* @param {boolean} deleted - flag indicating whether attempt to delete
* @param {object} serverSideEncryption - sse information for this bucket
* @param {number} serverSideEncryption.cryptoScheme -
* @param transient - flag indicating whether bucket is transient
* @param deleted - flag indicating whether attempt to delete
* @param serverSideEncryption - sse information for this bucket
* @param serverSideEncryption.cryptoScheme -
* cryptoScheme used
* @param {string} serverSideEncryption.algorithm -
* @param serverSideEncryption.algorithm -
* algorithm to use
* @param {string} serverSideEncryption.masterKeyId -
* @param serverSideEncryption.masterKeyId -
* key to get master key
* @param {string} serverSideEncryption.configuredMasterKeyId -
* @param serverSideEncryption.configuredMasterKeyId -
* custom KMS key id specified by user
* @param {boolean} serverSideEncryption.mandatory -
* @param serverSideEncryption.mandatory -
* true for mandatory encryption
* bucket has been made
* @param {object} versioningConfiguration - versioning configuration
* @param {string} versioningConfiguration.Status - versioning status
* @param {object} versioningConfiguration.MfaDelete - versioning mfa delete
* @param {string} locationConstraint - locationConstraint for bucket
* @param {WebsiteConfiguration} [websiteConfiguration] - website
* @param versioningConfiguration - versioning configuration
* @param versioningConfiguration.Status - versioning status
* @param versioningConfiguration.MfaDelete - versioning mfa delete
* @param locationConstraint - locationConstraint for bucket that
* also includes the ingestion flag
* @param [websiteConfiguration] - website
* configuration
* @param {object[]} [cors] - collection of CORS rules to apply
* @param {string} [cors[].id] - optional ID to identify rule
* @param {string[]} cors[].allowedMethods - methods allowed for CORS request
* @param {string[]} cors[].allowedOrigins - origins allowed for CORS request
* @param {string[]} [cors[].allowedHeaders] - headers allowed in an OPTIONS
* @param [cors] - collection of CORS rules to apply
* @param [cors[].id] - optional ID to identify rule
* @param cors[].allowedMethods - methods allowed for CORS request
* @param cors[].allowedOrigins - origins allowed for CORS request
* @param [cors[].allowedHeaders] - headers allowed in an OPTIONS
* request via the Access-Control-Request-Headers header
* @param {number} [cors[].maxAgeSeconds] - seconds browsers should cache
* @param [cors[].maxAgeSeconds] - seconds browsers should cache
* OPTIONS response
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications
* @param {object} [replicationConfiguration] - replication configuration
* @param {object} [lifecycleConfiguration] - lifecycle configuration
* @param {object} [bucketPolicy] - bucket policy
* @param {string} [uid] - unique identifier for the bucket, necessary
* @param {boolean} [objectLockEnabled] - true when object lock enabled
* @param {object} [objectLockConfiguration] - object lock configuration
* @param {object} [notificationConfiguration] - bucket notification configuration
* @param {object[]} [tags] - bucket tags
* @param [cors[].exposeHeaders] - headers expose to applications
* @param [replicationConfiguration] - replication configuration
* @param [lifecycleConfiguration] - lifecycle configuration
* @param [bucketPolicy] - bucket policy
* @param [uid] - unique identifier for the bucket, necessary
* @param readLocationConstraint - readLocationConstraint for bucket
* addition for use with lifecycle operations
* @param [isNFS] - whether the bucket is on NFS
* @param [ingestionConfig] - object for ingestion status: en/dis
* @param [azureInfo] - Azure storage account specific info
* @param [objectLockEnabled] - true when object lock enabled
* @param [objectLockConfiguration] - object lock configuration
* @param [notificationConfiguration] - bucket notification configuration
* @param [tags] - bucket tag set
* @param [capabilities] - capabilities for the bucket
* @param quotaMax - bucket quota
*/
constructor(
name: string,
@ -127,10 +178,16 @@ export default class BucketInfo {
lifecycleConfiguration?: any,
bucketPolicy?: any,
uid?: string,
readLocationConstraint?: string,
isNFS?: boolean,
ingestionConfig?: { status: 'enabled' | 'disabled' },
azureInfo?: any,
objectLockEnabled?: boolean,
objectLockConfiguration?: any,
notificationConfiguration?: any,
tags?: { key: string; value: string }[],
tags?: Array<BucketTag> | [],
capabilities?: Capabilities,
quotaMax?: number | 0,
) {
assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof owner, 'string');
@ -172,6 +229,15 @@ export default class BucketInfo {
if (locationConstraint) {
assert.strictEqual(typeof locationConstraint, 'string');
}
if (ingestionConfig) {
assert.strictEqual(typeof ingestionConfig, 'object');
}
if (azureInfo) {
assert.strictEqual(typeof azureInfo, 'object');
}
if (readLocationConstraint) {
assert.strictEqual(typeof readLocationConstraint, 'string');
}
if (websiteConfiguration) {
assert(websiteConfiguration instanceof WebsiteConfiguration);
const indexDocument = websiteConfiguration.getIndexDocument();
@ -217,8 +283,14 @@ export default class BucketInfo {
READ: [],
READ_ACP: [],
};
if (tags) {
assert(Array.isArray(tags));
if (tags === undefined) {
tags = [] as BucketTag[];
}
assert.strictEqual(areTagsValid(tags), true);
if (quotaMax) {
assert.strictEqual(typeof quotaMax, 'number');
assert(quotaMax >= 0, 'Quota cannot be negative');
}
// IF UPDATING PROPERTIES, INCREMENT MODELVERSION NUMBER ABOVE
@ -233,16 +305,22 @@ export default class BucketInfo {
this._serverSideEncryption = serverSideEncryption || null;
this._versioningConfiguration = versioningConfiguration || null;
this._locationConstraint = locationConstraint || null;
this._readLocationConstraint = readLocationConstraint || null;
this._websiteConfiguration = websiteConfiguration || null;
this._replicationConfiguration = replicationConfiguration || null;
this._cors = cors || null;
this._lifecycleConfiguration = lifecycleConfiguration || null;
this._bucketPolicy = bucketPolicy || null;
this._uid = uid || uuid();
this._isNFS = isNFS || null;
this._ingestion = ingestionConfig || null;
this._azureInfo = azureInfo || null;
this._objectLockEnabled = objectLockEnabled || false;
this._objectLockConfiguration = objectLockConfiguration || null;
this._notificationConfiguration = notificationConfiguration || null;
this._tags = tags || null;
this._tags = tags;
this._capabilities = capabilities || undefined;
this._quotaMax = quotaMax || 0;
return this;
}
@ -263,16 +341,22 @@ export default class BucketInfo {
serverSideEncryption: this._serverSideEncryption,
versioningConfiguration: this._versioningConfiguration,
locationConstraint: this._locationConstraint,
readLocationConstraint: this._readLocationConstraint,
websiteConfiguration: undefined,
cors: this._cors,
replicationConfiguration: this._replicationConfiguration,
lifecycleConfiguration: this._lifecycleConfiguration,
bucketPolicy: this._bucketPolicy,
uid: this._uid,
isNFS: this._isNFS,
ingestion: this._ingestion,
azureInfo: this._azureInfo,
objectLockEnabled: this._objectLockEnabled,
objectLockConfiguration: this._objectLockConfiguration,
notificationConfiguration: this._notificationConfiguration,
tags: this._tags,
capabilities: this._capabilities,
quotaMax: this._quotaMax,
};
const final = this._websiteConfiguration
? {
@ -296,8 +380,10 @@ export default class BucketInfo {
obj.transient, obj.deleted, obj.serverSideEncryption,
obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
obj.bucketPolicy, obj.uid, obj.objectLockEnabled,
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags);
obj.bucketPolicy, obj.uid, obj.readLocationConstraint, obj.isNFS,
obj.ingestion, obj.azureInfo, obj.objectLockEnabled,
obj.objectLockConfiguration, obj.notificationConfiguration, obj.tags,
obj.capabilities, obj.quotaMax);
}
/**
@ -321,8 +407,11 @@ export default class BucketInfo {
data._versioningConfiguration, data._locationConstraint,
data._websiteConfiguration, data._cors,
data._replicationConfiguration, data._lifecycleConfiguration,
data._bucketPolicy, data._uid, data._objectLockEnabled,
data._objectLockConfiguration, data._notificationConfiguration, data._tags);
data._bucketPolicy, data._uid, data._readLocationConstraint,
data._isNFS, data._ingestion, data._azureInfo,
data._objectLockEnabled, data._objectLockConfiguration,
data._notificationConfiguration, data._tags, data._capabilities,
data._quotaMax);
}
/**
@ -619,6 +708,17 @@ export default class BucketInfo {
return this._locationConstraint;
}
/**
* Get read location constraint.
* @return - bucket read location constraint
*/
getReadLocationConstraint() {
if (this._readLocationConstraint) {
return this._readLocationConstraint;
}
return this._locationConstraint;
}
/**
* Set Bucket model version
*
@ -707,6 +807,85 @@ export default class BucketInfo {
this._uid = uid;
return this;
}
/**
* Check if the bucket is an NFS bucket.
* @return - Wether the bucket is NFS or not
*/
isNFS() {
return this._isNFS;
}
/**
* Set whether the bucket is an NFS bucket.
* @param isNFS - Wether the bucket is NFS or not
* @return - bucket info instance
*/
setIsNFS(isNFS: boolean) {
this._isNFS = isNFS;
return this;
}
/**
* enable ingestion, set 'this._ingestion' to { status: 'enabled' }
* @return - bucket info instance
*/
enableIngestion() {
this._ingestion = { status: 'enabled' };
return this;
}
/**
* disable ingestion, set 'this._ingestion' to { status: 'disabled' }
* @return - bucket info instance
*/
disableIngestion() {
this._ingestion = { status: 'disabled' };
return this;
}
/**
* Get ingestion configuration
* @return - bucket ingestion configuration: Enabled or Disabled
*/
getIngestion() {
return this._ingestion;
}
/**
** Check if bucket is an ingestion bucket
* @return - 'true' if bucket is ingestion bucket, 'false' if
* otherwise
*/
isIngestionBucket() {
const ingestionConfig = this.getIngestion();
if (ingestionConfig) {
return true;
}
return false;
}
/**
* Check if ingestion is enabled
* @return - 'true' if ingestion is enabled, otherwise 'false'
*/
isIngestionEnabled() {
const ingestionConfig = this.getIngestion();
return ingestionConfig ? ingestionConfig.status === 'enabled' : false;
}
/**
* Return the Azure specific storage account information for this bucket
* @return - a structure suitable for {@link BucketAzureIno}
* constructor
*/
getAzureInfo() {
return this._azureInfo;
}
/**
* Set the Azure specific storage account information for this bucket
* @param azureInfo - a structure suitable for
* {@link BucketAzureInfo} construction
* @return - bucket info instance
*/
setAzureInfo(azureInfo: any) {
this._azureInfo = azureInfo;
return this;
}
/**
* Check if object lock is enabled.
* @return - depending on whether object lock is enabled
@ -726,7 +905,7 @@ export default class BucketInfo {
/**
* Get the value of bucket tags
* @return - Array of bucket tags as {"key" : "key", "value": "value"}
* @return - Array of bucket tags
*/
getTags() {
return this._tags;
@ -734,13 +913,58 @@ export default class BucketInfo {
/**
* Set bucket tags
* @param tags - collection of tags
* @param tags[].key - key of the tag
* @param tags[].value - value of the tag
* @return - bucket info instance
*/
setTags(tags: { key: string; value: string }[]) {
setTags(tags: Array<BucketTag>) {
this._tags = tags;
return this;
}
/**
* Get the value of bucket capabilities
* @return - capabilities of the bucket
*/
getCapabilities() {
return this._capabilities;
}
/**
* Get a specific bucket capability
*
* @param capability? - if provided, will return a specific capacity
* @return - capability of the bucket
*/
getCapability(capability: string) : VeeamSOSApi | undefined {
if (capability && this._capabilities && this._capabilities[capability]) {
return this._capabilities[capability];
}
return undefined;
}
/**
* Set bucket capabilities
* @return - bucket info instance
*/
setCapabilities(capabilities: Capabilities) {
this._capabilities = capabilities;
return this;
}
/**
* Get the bucket quota information
* @return quotaMax
*/
getQuota() {
return this._quotaMax;
}
/**
* Set bucket quota
* @param quota - quota to be set
* @return - bucket quota info
*/
setQuota(quota: number) {
this._quotaMax = quota || 0;
return this;
}
}

View File

@ -7,6 +7,8 @@ import escapeForXml from '../s3middleware/escapeForXml';
import type { XMLRule } from './ReplicationConfiguration';
import { Status } from './LifecycleRule';
const MAX_DAYS = 2147483647; // Max 32-bit signed binary integer.
/**
* Format of xml request:
@ -87,6 +89,7 @@ export default class LifecycleConfiguration {
_parsedXML: any;
_ruleIDs: string[];
_tagKeys: string[];
_storageClasses: string[];
_config: {
error?: ArsenalError;
rules?: any[];
@ -95,10 +98,13 @@ export default class LifecycleConfiguration {
/**
* Create a Lifecycle Configuration instance
* @param xml - the parsed xml
* @param config - the CloudServer config
* @return - LifecycleConfiguration instance
*/
constructor(xml: any) {
constructor(xml: any, config: { replicationEndpoints: { site: string }[] }) {
this._parsedXML = xml;
this._storageClasses =
config.replicationEndpoints.map(endpoint => endpoint.site);
this._ruleIDs = [];
this._tagKeys = [];
this._config = {};
@ -219,11 +225,6 @@ export default class LifecycleConfiguration {
* }
*/
_parseRule(rule: XMLRule) {
if (rule.Transition || rule.NoncurrentVersionTransition) {
const msg = 'Transition lifecycle action not yet implemented';
const error = errors.NotImplemented.customizeDescription(msg);
return { error };
}
// Either Prefix or Filter must be included, but can be empty string
if ((!rule.Filter && rule.Filter !== '') &&
(!rule.Prefix && rule.Prefix !== '')) {
@ -492,6 +493,172 @@ export default class LifecycleConfiguration {
return { ...base, ruleStatus: status }
}
/**
* Finds the prefix and/or tags of the given rule and gets the error message
* @param rule - The rule to find the prefix in
* @return - The prefix of filter information
*/
_getRuleFilterDesc(rule: { Prefix?: string[]; Filter?: any[] }) {
if (rule.Prefix) {
return `prefix '${rule.Prefix[0]}'`;
}
// There must be a filter if no top-level prefix is provided. First
// check if there are multiple filters (i.e. `Filter.And`).
if (rule.Filter?.[0] === undefined || rule.Filter[0].And === undefined) {
const { Prefix, Tag } = rule.Filter?.[0] || {};
if (Prefix) {
return `filter '(prefix=${Prefix[0]})'`;
}
if (Tag) {
const { Key, Value } = Tag[0];
return `filter '(tag: key=${Key[0]}, value=${Value[0]})'`;
}
return 'filter (all)';
}
const filters: string[] = [];
const { Prefix, Tag } = rule.Filter[0].And[0];
if (Prefix) {
filters.push(`prefix=${Prefix[0]}`);
}
Tag.forEach((tag: { Key: string[]; Value: string[] }) => {
const { Key, Value } = tag;
filters.push(`tag: key=${Key[0]}, value=${Value[0]}`);
});
const joinedFilters = filters.join(' and ');
return `filter '(${joinedFilters})'`;
}
/**
* Checks the validity of the given field
* @param params - Given function parameters
* @param params.days - The value of the field to check
* @param params.field - The field name with the value
* @param params.ancestor - The immediate ancestor field
* @return Returns an error object or `null`
*/
_checkDays(params: { days: number; field: string; ancestor: string }) {
const { days, field, ancestor } = params;
if (days < 0) {
const msg = `'${field}' in ${ancestor} action must be nonnegative`;
return errors.InvalidArgument.customizeDescription(msg);
}
if (days > MAX_DAYS) {
return errors.MalformedXML.customizeDescription(
`'${field}' in ${ancestor} action must not exceed ${MAX_DAYS}`);
}
return null;
}
/**
* Checks the validity of the given storage class
* @param params - Given function parameters
* @param params.usedStorageClasses - Storage classes used in other
* rules
* @param params.storageClass - The storage class of the current
* rule
* @param params.ancestor - The immediate ancestor field
* @param params.prefix - The prefix of the rule
* @return Returns an error object or `null`
*/
_checkStorageClasses(params: {
usedStorageClasses: string[];
storageClass: string;
ancestor: string;
rule: { Prefix?: string[]; Filter?: any };
}) {
const { usedStorageClasses, storageClass, ancestor, rule } = params;
if (!this._storageClasses.includes(storageClass)) {
// This differs from the AWS message. This will help the user since
// the StorageClass does not conform to AWS specs.
const list = `'${this._storageClasses.join("', '")}'`;
const msg = `'StorageClass' must be one of ${list}`;
return errors.MalformedXML.customizeDescription(msg);
}
if (usedStorageClasses.includes(storageClass)) {
const msg = `'StorageClass' must be different for '${ancestor}' ` +
`actions in same 'Rule' with ${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
return null;
}
/**
* Ensure that transition rules are at least a day apart from each other.
* @param params - Given function parameters
* @param [params.days] - The days of the current transition
* @param [params.date] - The date of the current transition
* @param params.storageClass - The storage class of the current
* rule
* @param params.rule - The current rule
*/
_checkTimeGap(params: {
days?: number;
date?: string;
storageClass: string;
rule: { Transition: any[]; Prefix?: string[]; Filter?: any };
}) {
const { days, date, storageClass, rule } = params;
const invalidTransition = rule.Transition.find(transition => {
if (storageClass === transition.StorageClass[0]) {
return false;
}
if (days !== undefined) {
return Number.parseInt(transition.Days[0], 10) === days;
}
if (date !== undefined) {
const timestamp = new Date(date).getTime();
const compareTimestamp = new Date(transition.Date[0]).getTime();
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
return Math.abs(timestamp - compareTimestamp) < oneDay;
}
return false;
});
if (invalidTransition) {
const timeType = days !== undefined ? 'Days' : 'Date';
const filterMsg = this._getRuleFilterDesc(rule);
const compareStorageClass = invalidTransition.StorageClass[0];
const msg = `'${timeType}' in the 'Transition' action for ` +
`StorageClass '${storageClass}' for ${filterMsg} must be at ` +
`least one day apart from ${filterMsg} in the 'Transition' ` +
`action for StorageClass '${compareStorageClass}'`;
return errors.InvalidArgument.customizeDescription(msg);
}
return null;
}
/**
* Checks transition time type (i.e. 'Date' or 'Days') only occurs once
* across transitions and across transitions and expiration policies
* @param params - Given function parameters
* @param params.usedTimeType - The time type that has been used by
* another rule
* @param params.currentTimeType - the time type used by the
* current rule
* @param params.rule - The current rule
* @return Returns an error object or `null`
*/
_checkTimeType(params: {
usedTimeType: string | null;
currentTimeType: string;
rule: { Prefix?: string[]; Filter?: any; Expiration?: any[] };
}) {
const { usedTimeType, currentTimeType, rule } = params;
if (usedTimeType && usedTimeType !== currentTimeType) {
const msg = "Found mixed 'Date' and 'Days' based Transition " +
'actions in lifecycle rule for ' +
`${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
// Transition time type cannot differ from the expiration, if provided.
if (rule.Expiration &&
rule.Expiration[0][currentTimeType] === undefined) {
const msg = "Found mixed 'Date' and 'Days' based Expiration and " +
'Transition actions in lifecycle rule for ' +
`${this._getRuleFilterDesc(rule)}`;
return errors.InvalidRequest.customizeDescription(msg);
}
return null;
}
/**
* Checks the validity of the given date
@ -533,6 +700,159 @@ export default class LifecycleConfiguration {
}
return null;
}
/**
* Parses the NonCurrentVersionTransition value
* @param rule - Rule object from Rule array from this._parsedXml
* @return - Contains error if parsing failed, otherwise contains
* the parsed nonCurrentVersionTransition array
*
* Format of result:
* result = {
* error: <error>,
* nonCurrentVersionTransition: [
* {
* noncurrentDays: <non-current-days>,
* storageClass: <storage-class>,
* },
* ...
* ]
* }
*/
_parseNoncurrentVersionTransition(rule: {
NoncurrentVersionTransition: any[];
Prefix?: string[];
Filter?: any;
}) {
const nonCurrentVersionTransition: {
noncurrentDays: number;
storageClass: string;
}[] = [];
const usedStorageClasses: string[] = [];
for (let i = 0; i < rule.NoncurrentVersionTransition.length; i++) {
const t = rule.NoncurrentVersionTransition[i]; // Transition object
const noncurrentDays: number | undefined =
t.NoncurrentDays && Number.parseInt(t.NoncurrentDays[0], 10);
const storageClass: string | undefined = t.StorageClass && t.StorageClass[0];
if (noncurrentDays === undefined || storageClass === undefined) {
return { error: errors.MalformedXML };
}
let error = this._checkDays({
days: noncurrentDays,
field: 'NoncurrentDays',
ancestor: 'NoncurrentVersionTransition',
});
if (error) {
return { error };
}
error = this._checkStorageClasses({
storageClass,
usedStorageClasses,
ancestor: 'NoncurrentVersionTransition',
rule,
});
if (error) {
return { error };
}
nonCurrentVersionTransition.push({ noncurrentDays, storageClass });
usedStorageClasses.push(storageClass);
}
return { nonCurrentVersionTransition };
}
/**
* Parses the Transition value
* @param rule - Rule object from Rule array from this._parsedXml
* @return - Contains error if parsing failed, otherwise contains
* the parsed transition array
*
* Format of result:
* result = {
* error: <error>,
* transition: [
* {
* days: <days>,
* date: <date>,
* storageClass: <storage-class>,
* },
* ...
* ]
* }
*/
_parseTransition(rule: {
Transition: any[];
Prefix?: string[];
Filter?: any;
}) {
const transition:
({ days: number; storageClass: string }
| { date: string; storageClass: string })[] = [];
const usedStorageClasses: string[] = [];
let usedTimeType: string | null = null;
for (let i = 0; i < rule.Transition.length; i++) {
const t = rule.Transition[i]; // Transition object
const days = t.Days && Number.parseInt(t.Days[0], 10);
const date = t.Date && t.Date[0];
const storageClass = t.StorageClass && t.StorageClass[0];
if ((days === undefined && date === undefined) ||
(days !== undefined && date !== undefined) ||
(storageClass === undefined)) {
return { error: errors.MalformedXML };
}
let error = this._checkStorageClasses({
storageClass,
usedStorageClasses,
ancestor: 'Transition',
rule,
});
if (error) {
return { error };
}
usedStorageClasses.push(storageClass);
if (days !== undefined) {
error = this._checkTimeType({
usedTimeType,
currentTimeType: 'Days',
rule,
});
if (error) {
return { error };
}
usedTimeType = 'Days';
error = this._checkDays({
days,
field: 'Days',
ancestor: 'Transition',
});
if (error) {
return { error };
}
transition.push({ days, storageClass });
}
if (date !== undefined) {
error = this._checkTimeType({
usedTimeType,
currentTimeType: 'Date',
rule,
});
if (error) {
return { error };
}
usedTimeType = 'Date';
error = this._checkDate(date);
if (error) {
return { error };
}
transition.push({ date, storageClass });
}
error = this._checkTimeGap({ days, date, storageClass, rule });
if (error) {
return { error };
}
}
return { transition };
}
/**
* Check that action component of rule is valid
* @param rule - a rule object from Rule array from this._parsedXml
@ -569,8 +889,13 @@ export default class LifecycleConfiguration {
propName: 'actions',
actions: [],
};
const validActions = ['AbortIncompleteMultipartUpload',
'Expiration', 'NoncurrentVersionExpiration'];
const validActions = [
'AbortIncompleteMultipartUpload',
'Expiration',
'NoncurrentVersionExpiration',
'NoncurrentVersionTransition',
'Transition',
];
validActions.forEach(a => {
if (rule[a]) {
actionsObj.actions.push({ actionName: `${a}` });
@ -587,7 +912,14 @@ export default class LifecycleConfiguration {
if (action.error) {
actionsObj.error = action.error;
} else {
const actionTimes = ['days', 'date', 'deleteMarker', 'newerNoncurrentVersions'];
const actionTimes = [
'days',
'date',
'deleteMarker',
'transition',
'nonCurrentVersionTransition',
'newerNoncurrentVersions'
];
actionTimes.forEach(t => {
if (action[t]) {
// eslint-disable-next-line no-param-reassign
@ -821,6 +1153,26 @@ export default class LifecycleConfiguration {
if (a.deleteMarker) {
assert.strictEqual(typeof a.deleteMarker, 'string');
}
if (a.nonCurrentVersionTransition) {
assert.strictEqual(
typeof a.nonCurrentVersionTransition, 'object');
a.nonCurrentVersionTransition.forEach(t => {
assert.strictEqual(typeof t.noncurrentDays, 'number');
assert.strictEqual(typeof t.storageClass, 'string');
});
}
if (a.transition) {
assert.strictEqual(typeof a.transition, 'object');
a.transition.forEach(t => {
if (t.days || t.days === 0) {
assert.strictEqual(typeof t.days, 'number');
}
if (t.date !== undefined) {
assert.strictEqual(typeof t.date, 'string');
}
assert.strictEqual(typeof t.storageClass, 'string');
});
}
if (a.newerNoncurrentVersions) {
assert.strictEqual(typeof a.newerNoncurrentVersions, 'number');
@ -874,7 +1226,15 @@ export default class LifecycleConfiguration {
}
const Actions = actions.map(action => {
const { actionName, days, date, deleteMarker, newerNoncurrentVersions } = action;
const {
actionName,
days,
date,
deleteMarker,
nonCurrentVersionTransition,
transition,
newerNoncurrentVersions,
} = action;
let Action: any;
if (actionName === 'AbortIncompleteMultipartUpload') {
Action = `<${actionName}><DaysAfterInitiation>${days}` +
@ -893,6 +1253,40 @@ export default class LifecycleConfiguration {
Action = `<${actionName}>${Days}${Date}${DelMarker}` +
`</${actionName}>`;
}
if (actionName === 'NoncurrentVersionTransition') {
const xml: string[] = [];
nonCurrentVersionTransition!.forEach(transition => {
const { noncurrentDays, storageClass } = transition;
xml.push(
`<${actionName}>`,
`<NoncurrentDays>${noncurrentDays}` +
'</NoncurrentDays>',
`<StorageClass>${storageClass}</StorageClass>`,
`</${actionName}>`,
);
});
Action = xml.join('');
}
if (actionName === 'Transition') {
const xml: string[] = [];
transition!.forEach(transition => {
const { days, date, storageClass } = transition;
let element: string = '';
if (days !== undefined) {
element = `<Days>${days}</Days>`;
}
if (date !== undefined) {
element = `<Date>${date}</Date>`;
}
xml.push(
`<${actionName}>`,
element,
`<StorageClass>${storageClass}</StorageClass>`,
`</${actionName}>`,
);
});
Action = xml.join('');
}
return Action;
}).join('');
return `<Rule>${ID}${Status}${Filter}${Actions}</Rule>`;
@ -975,6 +1369,15 @@ export type Rule = {
date?: number;
deleteMarker?: boolean;
newerNoncurrentVersions?: number;
nonCurrentVersionTransition?: {
noncurrentDays: number;
storageClass: string;
}[];
transition?: {
days?: number;
date?: string;
storageClass: string;
}[];
}[];
filter?: {
rulePrefix?: string;

View File

@ -28,6 +28,7 @@ export default class LifecycleRule {
ncvExpiration?: NoncurrentExpiration;
abortMPU?: { DaysAfterInitiation: number };
transitions?: any[];
ncvTransitions?: any[];
prefix?: string;
constructor(id: string, status: Status) {
@ -45,6 +46,7 @@ export default class LifecycleRule {
NoncurrentVersionExpiration?: NoncurrentExpiration;
AbortIncompleteMultipartUpload?: { DaysAfterInitiation: number };
Transitions?: any[];
NoncurrentVersionTransitions?: any[];
Filter?: Filter;
Prefix?: '';
} = { ID: this.id, Status: this.status };
@ -61,6 +63,9 @@ export default class LifecycleRule {
if (this.transitions) {
rule.Transitions = this.transitions;
}
if (this.ncvTransitions) {
rule.NoncurrentVersionTransitions = this.ncvTransitions;
}
const filter = this.buildFilter();
@ -173,4 +178,13 @@ export default class LifecycleRule {
this.transitions = transitions;
return this;
}
/**
* NonCurrentVersionTransitions
* @param nvcTransitions - NonCurrentVersionTransitions
*/
addNCVTransitions(nvcTransitions) {
this.ncvTransitions = nvcTransitions;
return this;
}
}

View File

@ -1,3 +1,4 @@
import * as crypto from 'crypto';
import * as constants from '../constants';
import * as VersionIDUtils from '../versioning/VersionID';
import { VersioningConstants } from '../versioning/constants';
@ -5,6 +6,8 @@ import ObjectMDLocation, {
ObjectMDLocationData,
Location,
} from './ObjectMDLocation';
import ObjectMDAmzRestore from './ObjectMDAmzRestore';
import ObjectMDArchive from './ObjectMDArchive';
export type ACL = {
Canned: string;
@ -29,6 +32,7 @@ export type ReplicationInfo = {
role: string;
storageType: string;
dataStoreVersionId: string;
isNFS: boolean | null;
};
export type ObjectMDData = {
@ -36,24 +40,26 @@ export type ObjectMDData = {
'owner-id': string;
'cache-control': string;
'content-disposition': string;
'content-language': string;
'content-encoding': string;
'creation-time'?: string;
'last-modified'?: string;
expires: string;
'content-length': number;
'content-type': string;
'content-md5': string;
// simple/no version. will expand once object versioning is
// introduced
'x-amz-version-id': 'null' | string;
'x-amz-server-version-id': string;
// TODO: Handle this as a utility function for all object puts
// similar to normalizing request but after checkAuth so
// string to sign is not impacted. This is GH Issue#89.
'x-amz-restore'?: ObjectMDAmzRestore;
archive?: ObjectMDArchive;
'x-amz-storage-class': string;
'x-amz-server-side-encryption': string;
'x-amz-server-side-encryption-aws-kms-key-id': string;
'x-amz-server-side-encryption-customer-algorithm': string;
'x-amz-website-redirect-location': string;
'x-amz-scal-transition-in-progress'?: boolean;
'x-amz-scal-transition-time'?: string;
azureInfo?: any;
acl: ACL;
key: string;
location: null | Location[];
@ -73,6 +79,17 @@ export type ObjectMDData = {
replicationInfo: ReplicationInfo;
dataStoreName: string;
originOp: string;
microVersionId?: string;
// Deletion flag
// Used for keeping object metadata in the oplog event
// In case of a deletion the flag is first updated before
// deleting the object
deleted: boolean;
// PHD flag indicates whether the object is a temporary placeholder.
// This is the case when the latest version of an object gets deleted
// the master is set as a placeholder and gets updated with the new latest
// version data after a certain amount of time.
isPHD: boolean;
};
/**
@ -101,9 +118,17 @@ export default class ObjectMD {
} else {
this._updateFromParsedJSON(objMd);
}
if (!this._data['creation-time']) {
const lastModified = this.getLastModified();
if (lastModified) {
this.setCreationTime(lastModified);
}
}
} else {
// set newly-created object md modified time to current time
this._data['last-modified'] = new Date().toJSON();
const dt = new Date().toJSON();
this.setLastModified(dt);
this.setCreationTime(dt);
}
// set latest md model version now that we ensured
// backward-compat conversion
@ -158,6 +183,8 @@ export default class ObjectMD {
'content-length': 0,
'content-type': '',
'content-md5': '',
'content-language': '',
'creation-time': undefined,
// simple/no version. will expand once object versioning is
// introduced
'x-amz-version-id': 'null',
@ -170,6 +197,7 @@ export default class ObjectMD {
'x-amz-server-side-encryption-aws-kms-key-id': '',
'x-amz-server-side-encryption-customer-algorithm': '',
'x-amz-website-redirect-location': '',
'x-amz-scal-transition-in-progress': false,
acl: {
Canned: 'private',
FULL_CONTROL: [],
@ -179,6 +207,7 @@ export default class ObjectMD {
},
key: '',
location: null,
azureInfo: undefined,
// versionId, isNull, nullVersionId and isDeleteMarker
// should be undefined when not set explicitly
isNull: undefined,
@ -198,9 +227,12 @@ export default class ObjectMD {
role: '',
storageType: '',
dataStoreVersionId: '',
isNFS: null,
},
dataStoreName: '',
originOp: '',
deleted: false,
isPHD: false,
};
}
@ -430,6 +462,50 @@ export default class ObjectMD {
return this._data['content-md5'];
}
/**
* Set content-language
*
* @param contentLanguage - content-language
* @return itself
*/
setContentLanguage(contentLanguage: string) {
this._data['content-language'] = contentLanguage;
return this;
}
/**
* Returns content-language
*
* @return content-language
*/
getContentLanguage() {
return this._data['content-language'];
}
/**
* Set Creation Date
*
* @param creationTime - Creation Date
* @return itself
*/
setCreationTime(creationTime: string) {
this._data['creation-time'] = creationTime;
return this;
}
/**
* Returns Creation Date
*
* @return Creation Date
*/
getCreationTime() {
// If creation-time is not set fallback to LastModified
if (!this._data['creation-time']) {
return this.getLastModified();
}
return this._data['creation-time'];
}
/**
* Set version id
*
@ -570,6 +646,48 @@ export default class ObjectMD {
return this._data['x-amz-website-redirect-location'];
}
/**
* Set metadata transition in progress value
*
* @param inProgress - True if transition is in progress, false otherwise
* @param transitionTime - Date when the transition started
* @return itself
*/
setTransitionInProgress(inProgress: false): this
setTransitionInProgress(inProgress: true, transitionTime: Date|string|number): this
setTransitionInProgress(inProgress: boolean, transitionTime?: Date|string|number) {
this._data['x-amz-scal-transition-in-progress'] = inProgress;
if (!inProgress || !transitionTime) {
delete this._data['x-amz-scal-transition-time'];
} else {
if (typeof transitionTime === 'number') {
transitionTime = new Date(transitionTime);
}
if (transitionTime instanceof Date) {
transitionTime = transitionTime.toISOString();
}
this._data['x-amz-scal-transition-time'] = transitionTime;
}
return this;
}
/**
* Get metadata transition in progress value
*
* @return True if transition is in progress, false otherwise
*/
getTransitionInProgress() {
return this._data['x-amz-scal-transition-in-progress'];
}
/**
* Gets the transition time of the object.
* @returns The transition time of the object.
*/
getTransitionTime() {
return this._data['x-amz-scal-transition-time'];
}
/**
* Set access control list
*
@ -675,6 +793,29 @@ export default class ObjectMD {
return reducedLocations;
}
/**
* Set the Azure specific information
* @param azureInfo - a plain JS structure representing the
* Azure specific information for a Blob or a Container (see constructor
* of {@link ObjectMDAzureInfo} for a description of the fields of this
* structure
* @return itself
*/
setAzureInfo(azureInfo: any) {
this._data.azureInfo = azureInfo;
return this;
}
/**
* Get the Azure specific information
* @return a plain JS structure representing the Azure specific
* information for a Blob or a Container an suitable for the constructor
* of {@link ObjectMDAzureInfo}.
*/
getAzureInfo() {
return this._data.azureInfo;
}
/**
* Set metadata isNull value
*
@ -781,6 +922,19 @@ export default class ObjectMD {
return this._data.isDeleteMarker || false;
}
/**
* Get if the object is a multipart upload (MPU)
*
* The function checks the "content-md5" field: if it contains a
* dash ('-') it is a MPU, as the content-md5 string ends with
* "-[nbparts]" for MPUs.
*
* @return Whether object is a multipart upload
*/
isMultipartUpload() {
return this.getContentMd5().includes('-');
}
/**
* Set metadata versionId value
*
@ -860,6 +1014,20 @@ export default class ObjectMD {
return this._data.tags;
}
getUserMetadata() {
const metaHeaders = {};
const data = this.getValue();
Object.keys(data).forEach(key => {
if (key.startsWith('x-amz-meta-')) {
metaHeaders[key] = data[key];
}
});
if (Object.keys(metaHeaders).length > 0) {
return JSON.stringify(metaHeaders);
}
return undefined;
}
/**
* Set replication information
*
@ -875,6 +1043,7 @@ export default class ObjectMD {
role: string;
storageType?: string;
dataStoreVersionId?: string;
isNFS?: boolean;
}) {
const {
status,
@ -885,6 +1054,7 @@ export default class ObjectMD {
role,
storageType,
dataStoreVersionId,
isNFS,
} = replicationInfo;
this._data.replicationInfo = {
status,
@ -895,6 +1065,7 @@ export default class ObjectMD {
role,
storageType: storageType || '',
dataStoreVersionId: dataStoreVersionId || '',
isNFS: isNFS || null,
};
return this;
}
@ -913,6 +1084,24 @@ export default class ObjectMD {
return this;
}
/**
* Set whether the replication is occurring from an NFS bucket.
* @param isNFS - Whether replication from an NFS bucket
* @return itself
*/
setReplicationIsNFS(isNFS: boolean) {
this._data.replicationInfo.isNFS = isNFS;
return this;
}
/**
* Get whether the replication is occurring from an NFS bucket.
* @return Whether replication from an NFS bucket
*/
getReplicationIsNFS() {
return this._data.replicationInfo.isNFS;
}
setReplicationSiteStatus(site: string, status: string) {
const backend = this._data.replicationInfo.backends.find(
(o) => o.site === site
@ -963,6 +1152,11 @@ export default class ObjectMD {
return this;
}
setReplicationStorageType(storageType: string) {
this._data.replicationInfo.storageType = storageType;
return this;
}
setReplicationStorageClass(storageClass: string) {
this._data.replicationInfo.storageClass = storageClass;
return this;
@ -1044,6 +1238,9 @@ export default class ObjectMD {
Object.keys(metaHeaders).forEach((key) => {
if (key.startsWith('x-amz-meta-')) {
this._data[key] = metaHeaders[key];
} else if (key.startsWith('x-ms-meta-')) {
const _key = key.replace('x-ms-meta-', 'x-amz-meta-');
this._data[_key] = metaHeaders[key];
}
});
// If a multipart object and the acl is already parsed, we update it
@ -1053,6 +1250,20 @@ export default class ObjectMD {
return this;
}
/**
* Clear all existing meta headers (used for Azure)
*
* @return itself
*/
clearMetadataValues() {
Object.keys(this._data).forEach(key => {
if (key.startsWith('x-amz-meta')) {
delete this._data[key];
}
});
return this;
}
/**
* overrideMetadataValues (used for complete MPU and object copy)
*
@ -1064,6 +1275,38 @@ export default class ObjectMD {
return this;
}
/**
* Create or update the microVersionId field
*
* This field can be used to force an update in MongoDB. This can
* be needed in the following cases:
*
* - in case no other metadata field changes
*
* - to detect a change when fields change but object version does
* not change e.g. when ingesting a putObjectTagging coming from
* S3C to Zenko
*
* - to manage conflicts during concurrent updates, using
* conditions on the microVersionId field.
*
* It's a field of 16 hexadecimal characters randomly generated
*
* @return itself
*/
updateMicroVersionId() {
this._data.microVersionId = crypto.randomBytes(8).toString('hex');
}
/**
* Get the microVersionId field, or null if not set
*
* @return the microVersionId field if exists, or {null} if it does not exist
*/
getMicroVersionId() {
return this._data.microVersionId || null;
}
/**
* Set object legal hold status
* @param legalHold - true if legal hold is 'ON' false if 'OFF'
@ -1144,4 +1387,98 @@ export default class ObjectMD {
getValue() {
return this._data;
}
/**
* Get x-amz-restore
*
* @returns x-amz-restore
*/
getAmzRestore() {
return this._data['x-amz-restore'];
}
/**
* Set x-amz-restore
*
* @param value x-amz-restore object
* @returns itself
* @throws case of invalid parameter
*/
setAmzRestore(value?: ObjectMDAmzRestore) {
if (value) {
// Accept object instance of ObjectMDAmzRestore and Object
if (!(value instanceof ObjectMDAmzRestore) && !ObjectMDAmzRestore.isValid(value)) {
throw new Error('x-amz-restore must be type of ObjectMDAmzRestore.');
}
this._data['x-amz-restore'] = value;
} else {
delete this._data['x-amz-restore'];
}
return this;
}
/**
* Get archive
*
* @returns archive
*/
getArchive() {
return this._data.archive;
}
/**
* Set archive
*
* @param value archive object
* @returns itself
* @throws case of invalid parameter
*/
setArchive(value: ObjectMDArchive) {
if (value) {
// Accept object instance of ObjectMDArchive and Object
if (!(value instanceof ObjectMDArchive) && !ObjectMDArchive.isValid(value)) {
throw new Error('archive is must be type of ObjectMDArchive.');
}
this._data.archive = value;
} else {
delete this._data.archive;
}
return this;
}
/**
* Set deleted flag
* @param {Boolean} value deleted object
* @return {ObjectMD}
*/
setDeleted(value) {
this._data.deleted = value;
return this;
}
/**
* Get deleted flag
* @return {Boolean}
*/
getDeleted() {
return this._data.deleted;
}
/**
* Set isPHD flag
* @param {Boolean} value isPHD value
* @return {ObjectMD}
*/
setIsPHD(value) {
this._data.isPHD = value;
return this;
}
/**
* Get isPHD flag
* @return {Boolean}
*/
getIsPHD() {
return this._data.isPHD;
}
}

View File

@ -0,0 +1,94 @@
/*
* Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020
*/
/**
* class representing the x-amz-restore of object metadata.
*
* @class
*/
export default class ObjectMDAmzRestore {
'expiry-date': Date | string;
'ongoing-request': boolean;
/**
*
* @constructor
* @param ongoingRequest ongoing-request
* @param [expiryDate] expiry-date
* @throws case of invalid parameter
*/
constructor(ongoingRequest: boolean, expiryDate?: Date | string) {
this.setOngoingRequest(ongoingRequest);
this.setExpiryDate(expiryDate);
}
/**
*
* @param data archiveInfo
* @returns true if the provided object is valid
*/
static isValid(data: { 'ongoing-request': boolean; 'expiry-date': Date | string }) {
try {
// eslint-disable-next-line no-new
new ObjectMDAmzRestore(data['ongoing-request'], data['expiry-date']);
return true;
} catch (err) {
return false;
}
}
/**
*
* @returns ongoing-request
*/
getOngoingRequest() {
return this['ongoing-request'];
}
/**
*
* @param value ongoing-request
* @throws case of invalid parameter
*/
setOngoingRequest(value?: boolean) {
if (value === undefined) {
throw new Error('ongoing-request is required.');
} else if (typeof value !== 'boolean') {
throw new Error('ongoing-request must be type of boolean.');
}
this['ongoing-request'] = value;
}
/**
*
* @returns expiry-date
*/
getExpiryDate() {
return this['expiry-date'];
}
/**
*
* @param value expiry-date
* @throws case of invalid parameter
*/
setExpiryDate(value?: Date | string) {
if (value) {
const checkWith = (new Date(value)).getTime();
if (Number.isNaN(Number(checkWith))) {
throw new Error('expiry-date is must be a valid Date.');
}
this['expiry-date'] = value;
}
}
/**
*
* @returns itself
*/
getValue() {
return this;
}
}

View File

@ -0,0 +1,184 @@
/**
* class representing the archive of object metadata.
*
* @class
*/
export default class ObjectMDArchive {
archiveInfo: any;
// @ts-ignore
restoreRequestedAt: Date | string;
// @ts-ignore
restoreRequestedDays: number;
// @ts-ignore
restoreCompletedAt: Date | string;
// @ts-ignore
restoreWillExpireAt: Date | string;
/**
*
* @constructor
* @param archiveInfo contains the archive info set by the TLP and returned by the TLP jobs
* @param [restoreRequestedAt] set at the time restore request is made by the client
* @param [restoreRequestedDays] set at the time restore request is made by the client
* @param [restoreCompletedAt] set at the time of successful restore
* @param [restoreWillExpireAt] computed and stored at the time of restore
* @throws case of invalid parameter
*/
constructor(
archiveInfo: any,
restoreRequestedAt?: Date | string,
restoreRequestedDays?: number,
restoreCompletedAt?: Date | string,
restoreWillExpireAt?: Date | string,
) {
this.setArchiveInfo(archiveInfo);
this.setRestoreRequestedAt(restoreRequestedAt!);
this.setRestoreRequestedDays(restoreRequestedDays!);
this.setRestoreCompletedAt(restoreCompletedAt!);
this.setRestoreWillExpireAt(restoreWillExpireAt!);
}
/**
*
* @param data archiveInfo
* @returns true if the provided object is valid
*/
static isValid(data: {
archiveInfo: any;
restoreRequestedAt?: Date;
restoreRequestedDays?: number;
restoreCompletedAt?: Date;
restoreWillExpireAt?: Date;
}) {
try {
// eslint-disable-next-line no-new
new ObjectMDArchive(
data.archiveInfo,
data.restoreRequestedAt,
data.restoreRequestedDays,
data.restoreCompletedAt,
data.restoreWillExpireAt,
);
return true;
} catch (err) {
return false;
}
}
/**
*
* @returns archiveInfo
*/
getArchiveInfo() {
return this.archiveInfo;
}
/**
* @param value archiveInfo
* @throws case of invalid parameter
*/
setArchiveInfo(value: any) {
if (!value) {
throw new Error('archiveInfo is required.');
} else if (typeof value !== 'object') {
throw new Error('archiveInfo must be type of object.');
}
this.archiveInfo = value;
}
/**
*
* @returns restoreRequestedAt
*/
getRestoreRequestedAt() {
return this.restoreRequestedAt;
}
/**
* @param value restoreRequestedAt
* @throws case of invalid parameter
*/
setRestoreRequestedAt(value: Date | string) {
if (value) {
const checkWith = (new Date(value)).getTime();
if (Number.isNaN(Number(checkWith))) {
throw new Error('restoreRequestedAt must be a valid Date.');
}
this.restoreRequestedAt = value;
}
}
/**
*
* @returns restoreRequestedDays
*/
getRestoreRequestedDays() {
return this.restoreRequestedDays;
}
/**
* @param value restoreRequestedDays
* @throws case of invalid parameter
*/
setRestoreRequestedDays(value: number) {
if (value) {
if (isNaN(value)) {
throw new Error('restoreRequestedDays must be type of Number.');
}
this.restoreRequestedDays = value;
}
}
/**
*
* @returns restoreCompletedAt
*/
getRestoreCompletedAt() {
return this.restoreCompletedAt;
}
/**
* @param value restoreCompletedAt
* @throws case of invalid parameter
*/
setRestoreCompletedAt(value: Date | string) {
if (value) {
if (!this.restoreRequestedAt || !this.restoreRequestedDays) {
throw new Error('restoreCompletedAt must be set after restoreRequestedAt and restoreRequestedDays.');
}
const checkWith = (new Date(value)).getTime();
if (Number.isNaN(Number(checkWith))) {
throw new Error('restoreCompletedAt must be a valid Date.');
}
this.restoreCompletedAt = value;
}
}
/**
*
* @returns restoreWillExpireAt
*/
getRestoreWillExpireAt() {
return this.restoreWillExpireAt;
}
/**
* @param value restoreWillExpireAt
* @throws case of invalid parameter
*/
setRestoreWillExpireAt(value: Date | string) {
if (value) {
if (!this.restoreRequestedAt || !this.restoreRequestedDays) {
throw new Error('restoreWillExpireAt must be set after restoreRequestedAt and restoreRequestedDays.');
}
const checkWith = (new Date(value)).getTime();
if (Number.isNaN(Number(checkWith))) {
throw new Error('restoreWillExpireAt must be a valid Date.');
}
this.restoreWillExpireAt = value;
}
}
/**
*
* @returns itself
*/
getValue() {
return this;
}
}

View File

@ -0,0 +1,188 @@
/**
* Helper class to ease access to the Azure specific information for
* Blob and Container objects.
*/
export default class ObjectMDAzureInfo {
_data: {
containerPublicAccess: string;
containerStoredAccessPolicies: any[];
containerImmutabilityPolicy: any;
containerLegalHoldStatus: boolean;
containerDeletionInProgress: boolean;
blobType: string;
blobContentMD5: string;
blobIssuedETag: string;
blobCopyInfo: any;
blobSequenceNumber: number;
blobAccessTierChangeTime: Date;
blobUncommitted: boolean;
};
/**
* @constructor
* @param obj - Raw structure for the Azure info on Blob/Container
* @param obj.containerPublicAccess - Public access authorization
* type
* @param obj.containerStoredAccessPolicies - Access policies
* for Shared Access Signature bearer
* @param obj.containerImmutabilityPolicy - data immutability
* policy for this container
* @param obj.containerLegalHoldStatus - legal hold status for
* this container
* @param obj.containerDeletionInProgress - deletion in progress
* indicator for this container
* @param obj.blobType - defines the type of blob for this object
* @param obj.blobContentMD5 - whole object MD5 sum set by the
* client through the Azure API
* @param obj.blobIssuedETag - backup of the issued ETag on MD only
* operations like Set Blob Properties and Set Blob Metadata
* @param obj.blobCopyInfo - information pertaining to past and
* pending copy operation targeting this object
* @param obj.blobSequenceNumber - sequence number for a PageBlob
* @param obj.blobAccessTierChangeTime - date of change of tier
* @param obj.blobUncommitted - A block has been put for a
* nonexistent blob which is about to be created
*/
constructor(obj: {
containerPublicAccess: string;
containerStoredAccessPolicies: any[];
containerImmutabilityPolicy: any;
containerLegalHoldStatus: boolean;
containerDeletionInProgress: boolean;
blobType: string;
blobContentMD5: string;
blobIssuedETag: string;
blobCopyInfo: any;
blobSequenceNumber: number;
blobAccessTierChangeTime: Date;
blobUncommitted: boolean;
}) {
this._data = {
containerPublicAccess: obj.containerPublicAccess,
containerStoredAccessPolicies: obj.containerStoredAccessPolicies,
containerImmutabilityPolicy: obj.containerImmutabilityPolicy,
containerLegalHoldStatus: obj.containerLegalHoldStatus,
containerDeletionInProgress: obj.containerDeletionInProgress,
blobType: obj.blobType,
blobContentMD5: obj.blobContentMD5,
blobIssuedETag: obj.blobIssuedETag,
blobCopyInfo: obj.blobCopyInfo,
blobSequenceNumber: obj.blobSequenceNumber,
blobAccessTierChangeTime: obj.blobAccessTierChangeTime,
blobUncommitted: obj.blobUncommitted,
};
}
getContainerPublicAccess() {
return this._data.containerPublicAccess;
}
setContainerPublicAccess(containerPublicAccess: string) {
this._data.containerPublicAccess = containerPublicAccess;
return this;
}
getContainerStoredAccessPolicies() {
return this._data.containerStoredAccessPolicies;
}
setContainerStoredAccessPolicies(containerStoredAccessPolicies: any[]) {
this._data.containerStoredAccessPolicies =
containerStoredAccessPolicies;
return this;
}
getContainerImmutabilityPolicy() {
return this._data.containerImmutabilityPolicy;
}
setContainerImmutabilityPolicy(containerImmutabilityPolicy: any) {
this._data.containerImmutabilityPolicy = containerImmutabilityPolicy;
return this;
}
getContainerLegalHoldStatus() {
return this._data.containerLegalHoldStatus;
}
setContainerLegalHoldStatus(containerLegalHoldStatus: boolean) {
this._data.containerLegalHoldStatus = containerLegalHoldStatus;
return this;
}
getContainerDeletionInProgress() {
return this._data.containerDeletionInProgress;
}
setContainerDeletionInProgress(containerDeletionInProgress: boolean) {
this._data.containerDeletionInProgress = containerDeletionInProgress;
return this;
}
getBlobType() {
return this._data.blobType;
}
setBlobType(blobType: string) {
this._data.blobType = blobType;
return this;
}
getBlobContentMD5() {
return this._data.blobContentMD5;
}
setBlobContentMD5(blobContentMD5: string) {
this._data.blobContentMD5 = blobContentMD5;
return this;
}
getBlobIssuedETag() {
return this._data.blobIssuedETag;
}
setBlobIssuedETag(blobIssuedETag: string) {
this._data.blobIssuedETag = blobIssuedETag;
return this;
}
getBlobCopyInfo() {
return this._data.blobCopyInfo;
}
setBlobCopyInfo(blobCopyInfo: any) {
this._data.blobCopyInfo = blobCopyInfo;
return this;
}
getBlobSequenceNumber() {
return this._data.blobSequenceNumber;
}
setBlobSequenceNumber(blobSequenceNumber: number) {
this._data.blobSequenceNumber = blobSequenceNumber;
return this;
}
getBlobAccessTierChangeTime() {
return this._data.blobAccessTierChangeTime;
}
setBlobAccessTierChangeTime(blobAccessTierChangeTime: Date) {
this._data.blobAccessTierChangeTime = blobAccessTierChangeTime;
return this;
}
getBlobUncommitted() {
return this._data.blobUncommitted;
}
setBlobUncommitted(blobUncommitted: boolean) {
this._data.blobUncommitted = blobUncommitted;
return this;
}
getValue() {
return this._data;
}
}

View File

@ -5,6 +5,7 @@ export type Location = BaseLocation & {
size: number;
dataStoreETag: string;
dataStoreVersionId: string;
blockId?: string;
};
export type ObjectMDLocationData = {
key: string;
@ -12,6 +13,8 @@ export type ObjectMDLocationData = {
size: number;
dataStoreName: string;
dataStoreETag: string;
dataStoreVersionId: string;
blockId?: string;
cryptoScheme?: number;
cipheredDataKey?: string;
};
@ -31,10 +34,14 @@ export default class ObjectMDLocation {
* @param locationObj.dataStoreName - type of data store
* @param locationObj.dataStoreETag - internal ETag of
* data part
* @param [locationObj.dataStoreVersionId] - versionId,
* needed for cloud backends
* @param [location.cryptoScheme] - if location data is
* encrypted: the encryption scheme version
* @param [location.cipheredDataKey] - if location data
* is encrypted: the base64-encoded ciphered data key
* @param [locationObj.blockId] - blockId of the part,
* set by the Azure Blob Service REST API frontend
*/
constructor(locationObj: Location | (Location & Ciphered)) {
this._data = {
@ -43,6 +50,8 @@ export default class ObjectMDLocation {
size: locationObj.size,
dataStoreName: locationObj.dataStoreName,
dataStoreETag: locationObj.dataStoreETag,
dataStoreVersionId: locationObj.dataStoreVersionId,
blockId: locationObj.blockId,
};
if ('cryptoScheme' in locationObj) {
this._data.cryptoScheme = locationObj.cryptoScheme;
@ -64,6 +73,7 @@ export default class ObjectMDLocation {
* @param location - single data location info
* @param location.key - data backend key
* @param location.dataStoreName - type of data store
* @param [location.dataStoreVersionId] - data backend version ID
* @param [location.cryptoScheme] - if location data is
* encrypted: the encryption scheme version
* @param [location.cipheredDataKey] - if location data
@ -71,15 +81,19 @@ export default class ObjectMDLocation {
* @return return this
*/
setDataLocation(location: BaseLocation | (BaseLocation & Ciphered)) {
['key', 'dataStoreName', 'cryptoScheme', 'cipheredDataKey'].forEach(
(attrName) => {
[
'key',
'dataStoreName',
'dataStoreVersionId',
'cryptoScheme',
'cipheredDataKey',
].forEach(attrName => {
if (location[attrName] !== undefined) {
this._data[attrName] = location[attrName];
} else {
delete this._data[attrName];
}
}
);
});
return this;
}
@ -87,6 +101,10 @@ export default class ObjectMDLocation {
return this._data.dataStoreETag;
}
getDataStoreVersionId() {
return this._data.dataStoreVersionId;
}
getPartNumber() {
return Number.parseInt(this._data.dataStoreETag.split(':')[0], 10);
}
@ -121,6 +139,15 @@ export default class ObjectMDLocation {
return this._data.cipheredDataKey;
}
getBlockId() {
return this._data.blockId;
}
setBlockId(blockId: string) {
this._data.blockId = blockId;
return this;
}
getValue() {
return this._data;
}

View File

@ -64,7 +64,8 @@ export default class ReplicationConfiguration {
_destination: string | null;
_rules: Rule[] | null;
_prevStorageClass: null;
_hasScalityDestination: boolean;
_hasScalityDestination: boolean | null;
_preferredReadLocation: string | null;
/**
* Create a ReplicationConfiguration instance
@ -86,7 +87,8 @@ export default class ReplicationConfiguration {
this._destination = null;
this._rules = null;
this._prevStorageClass = null;
this._hasScalityDestination = false;
this._hasScalityDestination = null;
this._preferredReadLocation = null;
}
/**
@ -113,6 +115,18 @@ export default class ReplicationConfiguration {
return this._rules;
}
/**
* The preferred read location
* @return {string|null} - The preferred read location if defined,
* otherwise null
*
* FIXME ideally we should be able to specify one preferred read
* location for each rule
*/
getPreferredReadLocation() {
return this._preferredReadLocation;
}
/**
* Get the replication configuration
* @return - The replication configuration
@ -122,6 +136,7 @@ export default class ReplicationConfiguration {
role: this.getRole(),
destination: this.getDestination(),
rules: this.getRules(),
preferredReadLocation: this.getPreferredReadLocation(),
};
}
@ -328,7 +343,15 @@ export default class ReplicationConfiguration {
return undefined;
}
const storageClasses = destination.StorageClass[0].split(',');
const isValidStorageClass = storageClasses.every((storageClass) => {
const prefReadIndex = storageClasses.findIndex(storageClass =>
storageClass.endsWith(':preferred_read'));
if (prefReadIndex !== -1) {
const prefRead = storageClasses[prefReadIndex].split(':')[0];
// remove :preferred_read tag from storage class name
storageClasses[prefReadIndex] = prefRead;
this._preferredReadLocation = prefRead;
}
const isValidStorageClass = storageClasses.every(storageClass => {
if (validStorageClasses.includes(storageClass)) {
this._hasScalityDestination =
defaultEndpoint.type === undefined;
@ -338,6 +361,11 @@ export default class ReplicationConfiguration {
(endpoint: any) => endpoint.site === storageClass
);
if (endpoint) {
// We do not support replication to cold location.
// Only transition to cold location is supported.
if (endpoint.site && this._config.locationConstraints[endpoint.site]?.isCold) {
return false;
}
// If this._hasScalityDestination was not set to true in any
// previous iteration or by a prior rule's storage class, then
// check if the current endpoint is a Scality destination.

View File

@ -1,11 +1,16 @@
export { default as ARN } from './ARN';
export { default as BackendInfo } from './BackendInfo';
export { default as BucketAzureInfo } from './BucketAzureInfo';
export { default as BucketInfo } from './BucketInfo';
export { default as ObjectMD } from './ObjectMD';
export { default as ObjectMDLocation } from './ObjectMDLocation';
export * as WebsiteConfiguration from './WebsiteConfiguration';
export { default as ReplicationConfiguration } from './ReplicationConfiguration';
export { default as BucketPolicy } from './BucketPolicy';
export { default as LifecycleConfiguration } from './LifecycleConfiguration';
export { default as LifecycleRule } from './LifecycleRule';
export { default as BucketPolicy } from './BucketPolicy';
export { default as ObjectLockConfiguration } from './ObjectLockConfiguration';
export { default as NotificationConfiguration } from './NotificationConfiguration';
export { default as ObjectLockConfiguration } from './ObjectLockConfiguration';
export { default as ObjectMD } from './ObjectMD';
export { default as ObjectMDAmzRestore } from './ObjectMDAmzRestore';
export { default as ObjectMDArchive } from './ObjectMDArchive';
export { default as ObjectMDAzureInfo } from './ObjectMDAzureInfo';
export { default as ObjectMDLocation } from './ObjectMDLocation';
export { default as ReplicationConfiguration } from './ReplicationConfiguration';
export * as WebsiteConfiguration from './WebsiteConfiguration';

View File

@ -1,5 +1,6 @@
import * as http from 'http';
import * as https from 'https';
import { https as HttpsAgent } from 'httpagent';
import * as tls from 'tls';
import * as net from 'net';
import assert from 'assert';
@ -371,6 +372,8 @@ export default class Server {
error: err.stack || err,
address: sock.address(),
});
// socket is not systematically destroyed
sock.destroy();
}
/**
@ -407,7 +410,11 @@ export default class Server {
method: 'arsenal.network.Server.start',
port: this._port,
});
this._https.agent = new https.Agent(this._https);
this._https.agent = new HttpsAgent.Agent(this._https, {
// Do not enforce the maximum number of sockets for the
// main server, as it might be able to serve more clients.
maxSockets: false,
});
this._server = https.createServer(this._https,
(req, res) => this._onRequest(req, res));
} else {
@ -428,7 +435,6 @@ export default class Server {
this._server.on('connection', sock => {
// Setting no delay of the socket to the value configured
// TODO fix this
// @ts-expect-errors
sock.setNoDelay(this.isNoDelay());
sock.on('error', err => this._logger.info(
'socket error - request rejected', { error: err }));

View File

@ -3,10 +3,12 @@ import * as utils from './http/utils';
import RESTServer from './rest/RESTServer';
import RESTClient from './rest/RESTClient';
import * as ProbeServer from './probe/ProbeServer';
import HealthProbeServer from './probe/HealthProbeServer';
import * as Utils from './probe/Utils';
export const http = { server, utils };
export const rest = { RESTServer, RESTClient };
export const probe = { ProbeServer };
export const probe = { ProbeServer, HealthProbeServer, Utils };
export { default as RoundRobin } from './RoundRobin';
export { default as kmip } from './kmip';

View File

@ -0,0 +1,94 @@
import * as http from 'http';
import httpServer from '../http/server';
import * as werelogs from 'werelogs';
import errors from '../../errors';
import ZenkoMetrics from '../../metrics/ZenkoMetrics';
import { sendSuccess, sendError } from './Utils';
function checkStub(_log: any) {
// eslint-disable-line
return true;
}
export default class HealthProbeServer extends httpServer {
logging: werelogs.Logger;
_reqHandlers: { [key: string]: any };
_livenessCheck: (log: any) => boolean;
_readinessCheck: (log: any) => boolean;
constructor(params: {
port: number;
bindAddress: string;
livenessCheck?: (log: any) => boolean;
readinessCheck?: (log: any) => boolean;
}) {
const logging = new werelogs.Logger('HealthProbeServer');
super(params.port, logging);
this.logging = logging;
this.setBindAddress(params.bindAddress || 'localhost');
// hooking our request processing function by calling the
// parent's method for that
this.onRequest(this._onRequest);
this._reqHandlers = {
'/_/health/liveness': this._onLiveness.bind(this),
'/_/health/readiness': this._onReadiness.bind(this),
'/_/monitoring/metrics': this._onMetrics.bind(this),
};
this._livenessCheck = params.livenessCheck || checkStub;
this._readinessCheck = params.readinessCheck || checkStub;
}
onLiveCheck(f: (log: any) => boolean) {
this._livenessCheck = f;
}
onReadyCheck(f: (log: any) => boolean) {
this._readinessCheck = f;
}
_onRequest(req: http.IncomingMessage, res: http.ServerResponse) {
const log = this.logging.newRequestLogger();
log.debug('request received', { method: req.method, url: req.url });
if (req.method !== 'GET') {
sendError(res, log, errors.MethodNotAllowed);
} else if (req.url && req.url in this._reqHandlers) {
this._reqHandlers[req.url](req, res, log);
} else {
sendError(res, log, errors.InvalidURI);
}
}
_onLiveness(
_req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
) {
if (this._livenessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
_onReadiness(
_req: http.IncomingMessage,
res: http.ServerResponse,
log: werelogs.RequestLogger,
) {
if (this._readinessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
// expose metrics to Prometheus
async _onMetrics(_req: http.IncomingMessage, res: http.ServerResponse) {
const metrics = await ZenkoMetrics.asPrometheus();
res.writeHead(200, {
'Content-Type': ZenkoMetrics.asPrometheusContentType(),
});
res.end(metrics);
}
}

View File

@ -4,19 +4,16 @@ import * as werelogs from 'werelogs';
import errors from '../../errors';
export const DEFAULT_LIVE_ROUTE = '/_/live';
export const DEFAULT_READY_ROUTE = '/_/live';
export const DEFAULT_METRICS_ROUTE = '/_/metrics';
export const DEFAULT_READY_ROUTE = '/_/ready';
export const DEFAULT_METRICS_ROUTE = '/metrics';
/**
* ProbeDelegate is used to determine if a probe is successful or
* if any errors are present.
* If everything is working as intended, it is a no-op.
* Otherwise, return a string representing what is failing.
* ProbeDelegate is used to handle probe checks.
* You can sendSuccess and sendError from Utils to handle success
* and failure conditions.
* @callback ProbeDelegate
* @param res - HTTP response for writing
* @param log - Werelogs instance for logging if you choose to
* @return String representing issues to report. An empty
* string or undefined is used to represent no issues.
*/
export type ProbeDelegate = (res: http.ServerResponse, log: werelogs.RequestLogger) => string | void
@ -90,6 +87,6 @@ export class ProbeServer extends httpServer {
return;
}
this._handlers.get(req.url!)!(res, log);
this._handlers.get(req.url ?? '')?.(res, log);
}
}

View File

@ -0,0 +1,49 @@
import * as http from 'http';
import { RequestLogger } from 'werelogs';
import { ArsenalError } from '../../errors';
/**
* Send a successful HTTP response of 200 OK
* @param res - HTTP response for writing
* @param log - Werelogs instance for logging if you choose to
* @param [message] - Message to send as response, defaults to OK
*/
export function sendSuccess(
res: http.ServerResponse,
log: RequestLogger,
message = 'OK'
) {
log.debug('replying with success');
res.writeHead(200);
res.end(message);
}
/**
* Send an Arsenal Error response
* @param res - HTTP response for writing
* @param log - Werelogs instance for logging if you choose to
* @param error - Error to send back to the user
* @param [optMessage] - Message to use instead of the errors message
*/
export function sendError(
res: http.ServerResponse,
log: RequestLogger,
error: ArsenalError,
optMessage?: string
) {
const message = optMessage || error.description || '';
log.debug('sending back error response', {
httpCode: error.code,
errorType: error.message,
error: message,
});
res.writeHead(error.code);
res.end(
JSON.stringify({
errorType: error.message,
errorMessage: message,
})
);
}

View File

@ -4,7 +4,7 @@ import * as werelogs from 'werelogs';
import * as constants from '../../constants';
import * as utils from './utils';
import errors, { ArsenalError } from '../../errors';
import HttpAgent from 'agentkeepalive';
import { http as HttpAgent } from 'httpagent';
import * as stream from 'stream';
function setRequestUids(reqHeaders: http.IncomingHttpHeaders, reqUids: string) {
@ -71,8 +71,9 @@ function makeErrorFromHTTPResponse(response: http.IncomingMessage) {
export default class RESTClient {
host: string;
port: number;
httpAgent: HttpAgent;
httpAgent: http.Agent;
logging: werelogs.Logger;
isPassthrough: boolean;
/**
* Interface to the data file server
@ -88,17 +89,19 @@ export default class RESTClient {
host: string;
port: number;
logApi: { Logger: typeof werelogs.Logger };
isPassthrough?: boolean;
}) {
assert(params.host);
assert(params.port);
this.host = params.host;
this.port = params.port;
this.isPassthrough = params.isPassthrough || false;
this.logging = new (params.logApi || werelogs).Logger('DataFileRESTClient');
this.httpAgent = new HttpAgent({
this.httpAgent = new HttpAgent.Agent({
keepAlive: true,
freeSocketTimeout: constants.httpClientFreeSocketTimeout,
});
}) as http.Agent;
}
/** Destroy the HTTP agent, forcing a close of the remaining open connections */
@ -121,11 +124,13 @@ export default class RESTClient {
) {
const reqHeaders = headers || {};
const urlKey = key || '';
const prefix = this.isPassthrough ?
constants.passthroughFileURL : constants.dataFileURL;
const reqParams = {
hostname: this.host,
port: this.port,
method,
path: `${constants.dataFileURL}/${urlKey}`,
path: encodeURI(`${prefix}/${urlKey}`),
headers: reqHeaders,
agent: this.httpAgent,
};

View File

@ -4,7 +4,7 @@ import * as werelogs from 'werelogs';
import * as http from 'http';
import httpServer from '../http/server';
import * as constants from '../../constants';
import * as utils from './utils';
import { parseURL } from './utils';
import * as httpUtils from '../http/utils';
import errors, { ArsenalError } from '../../errors';
@ -38,42 +38,6 @@ function sendError(
errorMessage: message })}\n`);
}
/**
* Parse the given url and return a pathInfo object. Sanity checks are
* performed.
*
* @param urlStr - URL to parse
* @param expectKey - whether the command expects to see a
* key in the URL
* @return a pathInfo object with URL items containing the
* following attributes:
* - pathInfo.service {String} - The name of REST service ("DataFile")
* - pathInfo.key {String} - The requested key
*/
function parseURL(urlStr: string, expectKey: boolean) {
const urlObj = url.parse(urlStr);
const pathInfo = utils.explodePath(urlObj.path!);
if (pathInfo.service !== constants.dataFileURL) {
throw errors.InvalidAction.customizeDescription(
`unsupported service '${pathInfo.service}'`);
}
if (expectKey && pathInfo.key === undefined) {
throw errors.MissingParameter.customizeDescription(
'URL is missing key');
}
if (!expectKey && pathInfo.key !== undefined) {
// note: we may implement rewrite functionality by allowing a
// key in the URL, though we may still provide the new key in
// the Location header to keep immutability property and
// atomicity of the update (we would just remove the old
// object when the new one has been written entirely in this
// case, saving a request over an equivalent PUT + DELETE).
throw errors.InvalidURI.customizeDescription(
'PUT url cannot contain a key');
}
return pathInfo;
}
/**
* @class
* @classdesc REST Server interface

View File

@ -1,6 +1,16 @@
import errors from '../../errors';
import * as constants from '../../constants';
import * as url from 'url';
const passthroughPrefixLength = constants.passthroughFileURL.length;
export function explodePath(path: string) {
if (path.startsWith(constants.passthroughFileURL)) {
const key = path.slice(passthroughPrefixLength + 1);
return {
service: constants.passthroughFileURL,
key: key.length > 0 ? key : undefined,
};
}
const pathMatch = /^(\/[a-zA-Z0-9]+)(\/([0-9a-f]*))?$/.exec(path);
if (pathMatch) {
return {
@ -10,4 +20,41 @@ export function explodePath(path: string) {
};
}
throw errors.InvalidURI.customizeDescription('malformed URI');
};
}
/**
* Parse the given url and return a pathInfo object. Sanity checks are
* performed.
*
* @param urlStr - URL to parse
* @param expectKey - whether the command expects to see a
* key in the URL
* @return a pathInfo object with URL items containing the
* following attributes:
* - pathInfo.service {String} - The name of REST service ("DataFile")
* - pathInfo.key {String} - The requested key
*/
export function parseURL(urlStr: string, expectKey: boolean) {
const urlObj = url.parse(urlStr);
const pathInfo = explodePath(decodeURI(urlObj.path!));
if ((pathInfo.service !== constants.dataFileURL)
&& (pathInfo.service !== constants.passthroughFileURL)) {
throw errors.InvalidAction.customizeDescription(
`unsupported service '${pathInfo.service}'`);
}
if (expectKey && pathInfo.key === undefined) {
throw errors.MissingParameter.customizeDescription(
'URL is missing key');
}
if (!expectKey && pathInfo.key !== undefined) {
// note: we may implement rewrite functionality by allowing a
// key in the URL, though we may still provide the new key in
// the Location header to keep immutability property and
// atomicity of the update (we would just remove the old
// object when the new one has been written entirely in this
// case, saving a request over an equivalent PUT + DELETE).
throw errors.InvalidURI.customizeDescription(
'PUT url cannot contain a key');
}
return pathInfo;
}

View File

@ -0,0 +1,209 @@
import { URL } from 'url';
import { decryptSecret } from '../executables/pensieveCreds/utils';
import { Logger } from 'werelogs';
export type LocationType =
| 'location-mem-v1'
| 'location-file-v1'
| 'location-azure-v1'
| 'location-ceph-radosgw-s3-v1'
| 'location-scality-ring-s3-v1'
| 'location-aws-s3-v1'
| 'location-wasabi-v1'
| 'location-do-spaces-v1'
| 'location-gcp-v1'
| 'location-scality-sproxyd-v1'
| 'location-nfs-mount-v1'
| 'location-scality-hdclient-v2';
export interface OverlayLocations {
[key: string]: {
name: string;
objectId: string;
details?: any;
locationType: string;
sizeLimitGB?: number;
isTransient?: boolean;
legacyAwsBehavior?: boolean;
};
}
export type Location = {
type:
| 'mem'
| 'file'
| 'azure'
| 'aws_s3'
| 'gcp'
| 'scality'
| 'pfs'
| 'scality';
name: string;
objectId: string;
details: { [key: string]: any };
locationType: string;
sizeLimitGB: number | null;
isTransient: boolean;
legacyAwsBehavior: boolean;
};
export function patchLocations(
overlayLocations: OverlayLocations | undefined | null,
creds: any,
log: Logger
) {
const locs = overlayLocations ?? {};
return Object.entries(locs).reduce<{ [key: string]: Location }>(
(acc, [k, l]) => {
const location: Location = {
type: 'mem',
name: k,
objectId: l.objectId,
details: l.details || {},
locationType: l.locationType,
sizeLimitGB: l.sizeLimitGB || null,
isTransient: Boolean(l.isTransient),
legacyAwsBehavior: Boolean(l.legacyAwsBehavior),
};
let supportsVersioning = false;
let pathStyle = process.env.CI_CEPH !== undefined;
switch (l.locationType) {
case 'location-mem-v1':
location.type = 'mem';
location.details = { supportsVersioning: true };
break;
case 'location-file-v1':
location.type = 'file';
location.details = { supportsVersioning: true };
break;
case 'location-azure-v1':
location.type = 'azure';
if (l.details.secretKey && l.details.secretKey.length > 0) {
location.details = {
bucketMatch: l.details.bucketMatch,
azureStorageEndpoint: l.details.endpoint,
azureStorageAccountName: l.details.accessKey,
azureStorageAccessKey: decryptSecret(
creds,
l.details.secretKey
),
azureContainerName: l.details.bucketName,
};
}
break;
case 'location-ceph-radosgw-s3-v1':
case 'location-scality-ring-s3-v1':
pathStyle = true; // fallthrough
case 'location-aws-s3-v1':
case 'location-wasabi-v1':
supportsVersioning = true; // fallthrough
case 'location-do-spaces-v1':
location.type = 'aws_s3';
if (l.details.secretKey && l.details.secretKey.length > 0) {
let https = true;
let awsEndpoint =
l.details.endpoint || 's3.amazonaws.com';
if (awsEndpoint.includes('://')) {
const url = new URL(awsEndpoint);
awsEndpoint = url.host;
https = url.protocol.includes('https');
}
location.details = {
credentials: {
accessKey: l.details.accessKey,
secretKey: decryptSecret(
creds,
l.details.secretKey
),
},
bucketName: l.details.bucketName,
bucketMatch: l.details.bucketMatch,
serverSideEncryption: Boolean(
l.details.serverSideEncryption
),
region: l.details.region,
awsEndpoint,
supportsVersioning,
pathStyle,
https,
};
}
break;
case 'location-gcp-v1':
location.type = 'gcp';
if (l.details.secretKey && l.details.secretKey.length > 0) {
location.details = {
credentials: {
accessKey: l.details.accessKey,
secretKey: decryptSecret(
creds,
l.details.secretKey
),
},
bucketName: l.details.bucketName,
mpuBucketName: l.details.mpuBucketName,
bucketMatch: l.details.bucketMatch,
gcpEndpoint:
l.details.endpoint || 'storage.googleapis.com',
https: true,
};
}
break;
case 'location-scality-sproxyd-v1':
location.type = 'scality';
if (
l.details &&
l.details.bootstrapList &&
l.details.proxyPath
) {
location.details = {
supportsVersioning: true,
connector: {
sproxyd: {
chordCos: l.details.chordCos || null,
bootstrap: l.details.bootstrapList,
path: l.details.proxyPath,
},
},
};
}
break;
case 'location-nfs-mount-v1':
location.type = 'pfs';
if (l.details) {
location.details = {
supportsVersioning: true,
bucketMatch: true,
pfsDaemonEndpoint: {
host: `${l.name}-cosmos-pfsd`,
port: 80,
},
};
}
break;
case 'location-scality-hdclient-v2':
location.type = 'scality';
if (l.details && l.details.bootstrapList) {
location.details = {
supportsVersioning: true,
connector: {
hdclient: {
bootstrap: l.details.bootstrapList,
},
},
};
}
break;
default:
log.info('unknown location type', {
locationType: l.locationType,
});
return acc;
}
return { ...acc, [location.name]: location };
},
{}
);
}

View File

@ -38,6 +38,10 @@
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:saml-provider/[\\w._-]{1,128}$"
},
"principalFederatedOidcIdp": {
"type": "string",
"pattern": "^(?:http(s)?:\/\/)?[\\w.-]+(?:\\.[\\w\\.-]+)+[\\w\\-\\._~:/?#[\\]@!\\$&'\\(\\)\\*\\+,;=.]+$"
},
"principalAWSItem": {
"type": "object",
"properties": {
@ -98,6 +102,9 @@
"oneOf": [
{
"$ref": "#/definitions/principalFederatedSamlIdp"
},
{
"$ref": "#/definitions/principalFederatedOidcIdp"
}
]
}

View File

@ -15,11 +15,36 @@ import {
actionMapScuba,
} from './utils/actionMaps';
const _actionNeedQuotaCheck = {
export const actionNeedQuotaCheck = {
objectPut: true,
objectPutVersion: true,
objectPutPart: true,
objectRestore: true,
};
/**
* This variable describes APIs that change the bytes
* stored, requiring quota updates
*/
export const actionWithDataDeletion = {
objectDelete: true,
objectDeleteVersion: true,
multipartDelete: true,
multiObjectDelete: true,
};
/**
* The function returns true if the current API call is a copy object
* and the action requires a quota evaluation logic, post retrieval
* of the object metadata.
* @param {string} action - the action being performed
* @param {string} currentApi - the current API being called
* @return {boolean} - whether the action requires a quota check
*/
export function actionNeedQuotaCheckCopy(action: string, currentApi: string) {
return action === 'objectGet' && (currentApi === 'objectCopy' || currentApi === 'objectPutCopyPart');
}
function _findAction(service: string, method: string) {
switch (service) {
case 's3':
@ -126,7 +151,9 @@ export type RequesterInfo = {
principalType: string;
principaltype: string;
userid: string;
username: string,
username: string;
keycloakGroup: string;
keycloakRole: string;
}
/**
@ -229,7 +256,8 @@ export default class RequestContext {
this._securityToken = securityToken;
this._policyArn = policyArn;
this._action = action;
this._needQuota = _actionNeedQuotaCheck[apiMethod] === true;
this._needQuota = actionNeedQuotaCheck[apiMethod] === true
|| actionWithDataDeletion[apiMethod] === true;
this._requestObjTags = requestObjTags || null;
this._existingObjTag = existingObjTag || null;
this._needTagEval = needTagEval || false;

View File

@ -33,6 +33,7 @@ const sharedActionMap = {
bypassGovernanceRetention: 's3:BypassGovernanceRetention',
listMultipartUploads: 's3:ListBucketMultipartUploads',
listParts: 's3:ListMultipartUploadParts',
metadataSearch: 's3:MetadataSearch',
multipartDelete: 's3:AbortMultipartUpload',
objectDelete: 's3:DeleteObject',
objectDeleteTagging: 's3:DeleteObjectTagging',
@ -47,6 +48,14 @@ const sharedActionMap = {
objectPutLegalHold: 's3:PutObjectLegalHold',
objectPutRetention: 's3:PutObjectRetention',
objectPutTagging: 's3:PutObjectTagging',
objectRestore: 's3:RestoreObject',
objectPutVersion: 's3:PutObjectVersion',
};
const actionMapBucketQuotas = {
bucketGetQuota: 'scality:GetBucketQuota',
bucketUpdateQuota: 'scality:UpdateBucketQuota',
bucketDeleteQuota: 'scality:DeleteBucketQuota',
};
// action map used for request context
@ -62,6 +71,7 @@ const actionMapRQ = {
initiateMultipartUpload: 's3:PutObject',
objectDeleteVersion: 's3:DeleteObjectVersion',
objectDeleteTaggingVersion: 's3:DeleteObjectVersionTagging',
objectGetArchiveInfo: 'scality:GetObjectArchiveInfo',
objectGetVersion: 's3:GetObjectVersion',
objectGetACLVersion: 's3:GetObjectVersionAcl',
objectGetTaggingVersion: 's3:GetObjectVersionTagging',
@ -70,10 +80,13 @@ const actionMapRQ = {
objectPutTaggingVersion: 's3:PutObjectVersionTagging',
serviceGet: 's3:ListAllMyBuckets',
objectReplicate: 's3:ReplicateObject',
objectPutRetentionVersion: 's3:PutObjectVersionRetention',
objectPutLegalHoldVersion: 's3:PutObjectVersionLegalHold',
objectGetRetentionVersion: 's3:GetObjectRetention',
objectPutRetentionVersion: 's3:PutObjectRetention',
objectGetLegalHoldVersion: 's3:GetObjectLegalHold',
objectPutLegalHoldVersion: 's3:PutObjectLegalHold',
listObjectVersions: 's3:ListBucketVersions',
...sharedActionMap,
...actionMapBucketQuotas,
};
// action map used for bucket policies
@ -125,6 +138,7 @@ const actionMonitoringMapS3 = {
initiateMultipartUpload: 'CreateMultipartUpload',
listMultipartUploads: 'ListMultipartUploads',
listParts: 'ListParts',
metadataSearch: 'MetadataSearch',
multiObjectDelete: 'DeleteObjects',
multipartDelete: 'AbortMultipartUpload',
objectCopy: 'CopyObject',
@ -143,7 +157,17 @@ const actionMonitoringMapS3 = {
objectPutPart: 'UploadPart',
objectPutRetention: 'PutObjectRetention',
objectPutTagging: 'PutObjectTagging',
objectRestore: 'RestoreObject',
serviceGet: 'ListBuckets',
bucketGetQuota: 'GetBucketQuota',
bucketUpdateQuota: 'UpdateBucketQuota',
bucketDeleteQuota: 'DeleteBucketQuota',
};
const actionMapAccountQuotas = {
UpdateAccountQuota : 'scality:UpdateAccountQuota',
DeleteAccountQuota : 'scality:DeleteAccountQuota',
GetAccountQuota : 'scality:GetAccountQuota',
};
const actionMapIAM = {
@ -168,6 +192,7 @@ const actionMapIAM = {
getPolicyVersion: 'iam:GetPolicyVersion',
getUser: 'iam:GetUser',
listAccessKeys: 'iam:ListAccessKeys',
listEntitiesForPolicy: 'iam:ListEntitiesForPolicy',
listGroupPolicies: 'iam:ListGroupPolicies',
listGroups: 'iam:ListGroups',
listGroupsForUser: 'iam:ListGroupsForUser',
@ -186,6 +211,7 @@ const actionMapIAM = {
tagUser: 'iam:TagUser',
unTagUser: 'iam:UntagUser',
listUserTags: 'iam:ListUserTags',
...actionMapAccountQuotas,
};
const actionMapSSO = {

View File

@ -142,6 +142,8 @@ export function findConditionKey(
// header
case 's3:ObjLocationConstraint': return headers['x-amz-meta-scal-location-constraint'];
case 'sts:ExternalId': return requestContext.getRequesterExternalId();
case 'keycloak:groups': return requesterInfo.keycloakGroup;
case 'keycloak:roles': return requesterInfo.keycloakRole;
case 'iam:PolicyArn': return requestContext.getPolicyArn();
// s3:ExistingObjectTag - Used to check that existing object tag has
// specific tag key and value. Extraction of correct tag key is done in CloudServer.

View File

@ -30,7 +30,7 @@ export default class ResultsCollector extends EventEmitter {
* @emits ResultCollector#done
* @emits ResultCollector#error
*/
pushResult(err: Error | undefined, subPartIndex: number) {
pushResult(err: Error | null | undefined, subPartIndex: number) {
this._results.push({
error: err,
subPartIndex,

View File

@ -1,6 +1,7 @@
import assert from 'assert';
import * as crypto from 'crypto';
import * as stream from 'stream';
import azure from '@azure/storage-blob';
import { RequestLogger } from 'werelogs';
@ -8,7 +9,7 @@ import ResultsCollector from './ResultsCollector';
import SubStreamInterface from './SubStreamInterface';
import * as objectUtils from '../objectUtils';
import MD5Sum from '../MD5Sum';
import errors from '../../errors';
import errors, { ArsenalError } from '../../errors';
export const splitter = '|';
export const overviewMpuKey = 'azure_mpu';
@ -64,7 +65,7 @@ export const getBlockId = (
const paddedSubPart = padString(subPartIndex, 'subPart');
const blockId = `${uploadId}${splitter}partNumber${paddedPartNumber}` +
`${splitter}subPart${paddedSubPart}${splitter}`;
return padString(blockId, 'part');
return Buffer.from(padString(blockId, 'part')).toString('base64');
};
export const getSummaryPartId = (partNumber: number, eTag: string, size: number) => {
@ -103,10 +104,17 @@ export const getSubPartIds = (
) => [...Array(part.numberSubParts).keys()].map(subPartIndex =>
getBlockId(uploadId, part.partNumber, subPartIndex));
// TODO Better type this
type ErrorWrapperFn = (
s3Method: string,
azureMethod: string,
command: (client: azure.ContainerClient) => Promise<any>,
log: RequestLogger,
cb: (err: ArsenalError | null | undefined) => void,
) => void
export const putSinglePart = (
errorWrapperFn: (first: string, second: string, third: any, log: any, cb: any) => void,
request: any,
errorWrapperFn: ErrorWrapperFn,
request: stream.Readable,
params: {
bucketName: string;
partNumber: number;
@ -117,44 +125,44 @@ export const putSinglePart = (
},
dataStoreName: string,
log: RequestLogger,
cb: any,
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string, size?: number) => void,
) => {
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
= params;
const blockId = getBlockId(uploadId, partNumber, 0);
const passThrough = new stream.PassThrough();
const options = contentMD5
? { useTransactionalMD5: true, transactionalContentMD5: contentMD5 }
? { transactionalContentMD5: objectUtils.getMD5Buffer(contentMD5) }
: {};
request.pipe(passThrough);
return errorWrapperFn('uploadPart', 'createBlockFromStream',
[blockId, bucketName, objectKey, passThrough, size, options,
(err: any | null, result: any) => {
if (err) {
return errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
try {
const result = await client.getBlockBlobClient(objectKey)
.stageBlock(blockId, () => passThrough, size, options);
const md5 = result.contentMD5 || '';
const eTag = objectUtils.getHexMD5(md5);
return eTag
} catch (err: any) {
log.error('Error from Azure data backend uploadPart',
{ error: err.message, dataStoreName });
if (err.code === 'ContainerNotFound') {
return cb(errors.NoSuchBucket);
throw errors.NoSuchBucket;
}
if (err.code === 'InvalidMd5') {
return cb(errors.InvalidDigest);
throw errors.InvalidDigest;
}
if (err.code === 'Md5Mismatch') {
return cb(errors.BadDigest);
throw errors.BadDigest;
}
return cb(errors.InternalError.customizeDescription(
`Error returned from Azure: ${err.message}`),
throw errors.InternalError.customizeDescription(
`Error returned from Azure: ${err.message}`
);
}
const md5 = result.headers['content-md5'] || '';
const eTag = objectUtils.getHexMD5(md5);
return cb(null, eTag, size);
}], log, cb);
}, log, cb);
};
// TODO type this
export const putNextSubPart = (
errorWrapperFn: any,
const putNextSubPart = (
errorWrapperFn: ErrorWrapperFn,
partParams: {
uploadId: string;
partNumber: number;
@ -162,11 +170,10 @@ export const putNextSubPart = (
objectKey: string;
},
subPartInfo: { lastPartIndex: number; lastPartSize: number },
subPartStream: any,
subPartStream: stream.Readable,
subPartIndex: number,
resultsCollector: ResultsCollector,
log: RequestLogger,
cb: any,
) => {
const { uploadId, partNumber, bucketName, objectKey } = partParams;
const subPartSize = getSubPartSize(
@ -174,14 +181,20 @@ export const putNextSubPart = (
const subPartId = getBlockId(uploadId, partNumber,
subPartIndex);
resultsCollector.pushOp();
errorWrapperFn('uploadPart', 'createBlockFromStream',
[subPartId, bucketName, objectKey, subPartStream, subPartSize,
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
errorWrapperFn('uploadPart', 'createBlockFromStream', async client => {
try {
const result = await client.getBlockBlobClient(objectKey)
.stageBlock(subPartId, () => subPartStream, subPartSize, {});
resultsCollector.pushResult(null, subPartIndex);
} catch (err: any) {
resultsCollector.pushResult(err, subPartIndex);
}
}, log, () => {});
};
export const putSubParts = (
errorWrapperFn: any,
request: any,
errorWrapperFn: ErrorWrapperFn,
request: stream.Readable,
params: {
uploadId: string;
partNumber: number;
@ -191,7 +204,7 @@ export const putSubParts = (
},
dataStoreName: string,
log: RequestLogger,
cb: any,
cb: (err: ArsenalError | null | undefined, dataStoreETag?: string) => void,
) => {
const subPartInfo = getSubPartInfo(params.size);
const resultsCollector = new ResultsCollector();
@ -230,14 +243,13 @@ export const putSubParts = (
const totalLength = streamInterface.getTotalBytesStreamed();
log.trace('successfully put subparts to Azure',
{ numberSubParts, totalLength });
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash,
totalLength));
hashedStream.on('hashed', () => cb(null, hashedStream.completedHash));
// in case the hashed event was already emitted before the
// event handler was registered:
if (hashedStream.completedHash) {
hashedStream.removeAllListeners('hashed');
return cb(null, hashedStream.completedHash, totalLength);
return cb(null, hashedStream.completedHash);
}
return undefined;
});
@ -245,7 +257,7 @@ export const putSubParts = (
const currentStream = streamInterface.getCurrentStream();
// start first put to Azure before we start streaming the data
putNextSubPart(errorWrapperFn, params, subPartInfo,
currentStream, 0, resultsCollector, log, cb);
currentStream, 0, resultsCollector, log);
request.pipe(hashedStream);
hashedStream.on('end', () => {
@ -265,8 +277,8 @@ export const putSubParts = (
}
const { nextStream, subPartIndex } =
streamInterface.transitionToNextStream();
putNextSubPart(errorWrapperFn, params, subPartInfo,
nextStream, subPartIndex, resultsCollector, log, cb);
putNextSubPart(errorWrapperFn, params, subPartInfo, nextStream,
subPartIndex, resultsCollector, log);
streamInterface.write(firstChunk);
} else {
streamInterface.write(data);

View File

@ -1,19 +1,25 @@
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
import { scaleMsPerDay } from '../objectUtils';
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
export default class LifecycleDateTime {
_transitionOneDayEarlier?: boolean;
_expireOneDayEarlier?: boolean;
_timeProgressionFactor?: number;
_scaledMsPerDay: number;
constructor(params?: {
transitionOneDayEarlier: boolean;
expireOneDayEarlier: boolean;
timeProgressionFactor: number;
}) {
this._transitionOneDayEarlier = params?.transitionOneDayEarlier;
this._expireOneDayEarlier = params?.expireOneDayEarlier;
this._timeProgressionFactor = params?.timeProgressionFactor || 1;
this._scaledMsPerDay = scaleMsPerDay(this._timeProgressionFactor);
}
getCurrentDate() {
const timeTravel = this._expireOneDayEarlier ? oneDay : 0;
const timeTravel = this._expireOneDayEarlier ? msInOneDay : 0;
return Date.now() + timeTravel;
}
@ -25,7 +31,7 @@ export default class LifecycleDateTime {
findDaysSince(date: Date) {
const now = this.getCurrentDate();
const diff = now - date.getTime();
return Math.floor(diff / (1000 * 60 * 60 * 24));
return Math.floor(diff / this._scaledMsPerDay);
}
/**
@ -52,8 +58,25 @@ export default class LifecycleDateTime {
}
if (transition.Days !== undefined) {
const lastModifiedTime = this.getTimestamp(lastModified);
const timeTravel = this._transitionOneDayEarlier ? -oneDay : 0;
return lastModifiedTime + (transition.Days * oneDay) + timeTravel;
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
return lastModifiedTime + (transition.Days * this._scaledMsPerDay) + timeTravel;
}
}
/**
* Find the Unix time at which the non-current version transition should occur.
* @param transition - A non-current version transition from the lifecycle non-current version transitions
* @param lastModified - The object's last modified date
* @return - The normalized transition timestamp
*/
getNCVTransitionTimestamp(
transition: { NoncurrentDays?: number },
lastModified: string,
) {
if (transition.NoncurrentDays !== undefined) {
const lastModifiedTime = this.getTimestamp(lastModified);
const timeTravel = this._transitionOneDayEarlier ? -msInOneDay : 0;
return lastModifiedTime + (transition.NoncurrentDays * this._scaledMsPerDay) + timeTravel;
}
}
}

View File

@ -61,6 +61,47 @@ export default class LifecycleUtils {
return trans1 > trans2 ? transition1 : transition2;
}
/**
* Compare two non-current version transition rules and return the one that is most recent.
* @param params - The function parameters
* @param params.transition1 - A non-current version transition from the current rule
* @param params.transition2 - A non-current version transition from the previous rule
* @param params.lastModified - The object's last modified
* date
* @return The most applicable transition rule
*/
compareNCVTransitions(params: {
lastModified: string;
transition1: any;
transition2?: any;
}): number | undefined;
compareNCVTransitions(params: {
lastModified: string;
transition1?: any;
transition2: any;
}): number | undefined;
compareNCVTransitions(params: {
lastModified: string;
transition1: any;
transition2: any;
}): number | undefined;
compareNCVTransitions(params: {
lastModified: string;
transition1?: any;
transition2?: any;
}) {
const { transition1, transition2, lastModified } = params;
if (transition1 === undefined) {
return transition2;
}
if (transition2 === undefined) {
return transition1;
}
const trans1 = this._datetime.getNCVTransitionTimestamp(transition1!, lastModified)!;
const trans2 = this._datetime.getNCVTransitionTimestamp(transition2!, lastModified)!;
return trans1 > trans2 ? transition1 : transition2;
}
// TODO Fix This
/**
* Find the most relevant trantition rule for the given transitions array
@ -98,6 +139,42 @@ export default class LifecycleUtils {
});
}
/**
* Find the most relevant non-current version transition rule for the given transitions array
* and any previously stored non-current version transition from another rule.
* @param params - The function parameters
* @param params.transitions - Array of lifecycle non-current version transitions
* @param params.lastModified - The object's last modified
* date
* @return The most applicable non-current version transition rule
*/
getApplicableNCVTransition(params: {
store: any;
currentDate: Date;
transitions: any[];
lastModified: string;
}) {
const { transitions, store, lastModified, currentDate } = params;
const transition = transitions.reduce((result, transition) => {
const isApplicable = // Is the transition time in the past?
this._datetime.getTimestamp(currentDate) >=
this._datetime.getNCVTransitionTimestamp(transition, lastModified)!;
if (!isApplicable) {
return result;
}
return this.compareNCVTransitions({
transition1: transition,
transition2: result,
lastModified,
});
}, undefined);
return this.compareNCVTransitions({
transition1: transition,
transition2: store.NoncurrentVersionTransition,
lastModified,
});
}
// TODO
/**
* Filter out all rules based on `Status` and `Filter` (Prefix and Tags)
@ -241,7 +318,17 @@ export default class LifecycleUtils {
currentDate,
});
}
// TODO: Add support for NoncurrentVersionTransitions.
const ncvt = 'NoncurrentVersionTransitions';
const hasNoncurrentVersionTransitions = Array.isArray(rule[ncvt]) && rule[ncvt].length > 0;
if (hasNoncurrentVersionTransitions && this._supportedRules.includes('noncurrentVersionTransition')) {
store.NoncurrentVersionTransition = this.getApplicableNCVTransition({
transitions: rule.NoncurrentVersionTransitions,
lastModified: metadata.LastModified,
store,
currentDate,
});
}
return store;
}, {});
// Do not transition to a location where the object is already stored.
@ -249,6 +336,12 @@ export default class LifecycleUtils {
&& applicableRules.Transition.StorageClass === metadata.StorageClass) {
applicableRules.Transition = undefined;
}
if (applicableRules.NoncurrentVersionTransition
&& applicableRules.NoncurrentVersionTransition.StorageClass === metadata.StorageClass) {
applicableRules.NoncurrentVersionTransition = undefined;
}
return applicableRules;
/* eslint-enable no-param-reassign */
}

View File

@ -0,0 +1,110 @@
import {parseStringPromise} from 'xml2js';
import errors, {ArsenalError} from '../errors';
import * as werelogs from 'werelogs';
import {validRestoreObjectTiers} from "../constants";
/*
Format of xml request:
<RestoreRequest>
<Days>integer</Days>
<Tier>Standard|Bulk|Expedited</Tier>
</RestoreRequest>
*/
/**
* validate restore request xml
* @param restoreRequest - parsed restore request object
* @return{ArsenalError|undefined} - error on failure, undefined on success
*/
function validateRestoreRequest(restoreRequest?: any) {
if (!restoreRequest) {
const desc = 'request xml does not contain RestoreRequest';
return errors.MalformedXML.customizeDescription(desc);
}
if (!restoreRequest.Days || !restoreRequest.Days[0]) {
const desc = 'request xml does not contain RestoreRequest.Days';
return errors.MalformedXML.customizeDescription(desc);
}
// RestoreRequest.Days must be greater than or equal to 1
const daysValue = Number.parseInt(restoreRequest.Days[0], 10);
if (Number.isNaN(daysValue)) {
const desc = `RestoreRequest.Days is invalid type. [${restoreRequest.Days[0]}]`;
return errors.MalformedXML.customizeDescription(desc);
}
if (daysValue < 1) {
const desc = `RestoreRequest.Days must be greater than 0. [${restoreRequest.Days[0]}]`;
return errors.MalformedXML.customizeDescription(desc);
}
if (daysValue > 2147483647) {
const desc = `RestoreRequest.Days must be less than 2147483648. [${restoreRequest.Days[0]}]`;
return errors.MalformedXML.customizeDescription(desc);
}
if (restoreRequest.Tier && restoreRequest.Tier[0] && !validRestoreObjectTiers.has(restoreRequest.Tier[0])) {
const desc = `RestoreRequest.Tier is invalid value. [${restoreRequest.Tier[0]}]`;
return errors.MalformedXML.customizeDescription(desc);
}
return undefined;
}
/**
* parseRestoreRequestXml - Parse and validate xml body, returning callback with
* object restoreReqObj: { days: <value>, tier: <value> }
* @param xml - xml body to parse and validate
* @param log - Werelogs logger
* @param cb - callback to server
* @return - calls callback with object restore request or error
*/
export async function parseRestoreRequestXml(
xml: string,
log: werelogs.Logger,
cb: (err: ArsenalError | null, data?: any) => void,
) {
let result;
try {
result = await parseStringPromise(xml);
} catch (err) {
log.debug('xml parsing failed', {
error: err,
method: 'parseRestoreXml',
xml,
});
return cb(errors.MalformedXML);
}
if (!result) {
const desc = 'request xml is undefined or empty';
return cb(errors.MalformedXML.customizeDescription(desc));
}
const restoreRequest = result.RestoreRequest;
const restoreReqError = validateRestoreRequest(restoreRequest);
if (restoreReqError) {
log.debug('restore request validation failed', {
error: restoreReqError,
method: 'validateRestoreRequest',
xml,
});
return cb(restoreReqError);
}
// If do not specify Tier, set "Standard"
return cb(null, {
days: Number.parseInt(restoreRequest.Days, 10),
tier: restoreRequest.Tier && restoreRequest.Tier[0] ? restoreRequest.Tier[0] : 'Standard',
});
}
/**
* convertToXml - Convert restore request info object to xml
* @param days - restore days
* @param tier - restore tier
* @return - returns restore request information xml string
*/
export function convertToXml(days: string, tier: string) {
if (!(days && tier)) {
return '';
}
return [
'<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
`<Days>${days}</Days>`,
`<Tier>${tier}</Tier>`,
'</RestoreRequest>',
].join('');
}

View File

@ -1,5 +1,21 @@
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string>) =>
Buffer.from(base64MD5, 'base64').toString('hex');
const msInOneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
export const getMD5Buffer = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
base64MD5 instanceof Uint8Array ? base64MD5 : Buffer.from(base64MD5, 'base64')
export const getHexMD5 = (base64MD5: WithImplicitCoercion<string> | Uint8Array) =>
getMD5Buffer(base64MD5).toString('hex');
export const getBase64MD5 = (hexMD5: WithImplicitCoercion<string>) =>
Buffer.from(hexMD5, 'hex').toString('base64');
/**
* Calculates the number of scaled milliseconds per day based on the given time progression factor.
* This function is intended for testing and simulation purposes only.
* @param {number} timeProgressionFactor - The desired time progression factor for scaling.
* @returns {number} The number of scaled milliseconds per day.
* If the result is 0, the minimum value of 1 millisecond is returned.
*/
export const scaleMsPerDay = (timeProgressionFactor: number): number =>
Math.round(msInOneDay / (timeProgressionFactor || 1)) || 1;

View File

@ -3,6 +3,11 @@ import * as werelogs from 'werelogs';
import errors, { ArsenalError } from '../errors';
import escapeForXml from './escapeForXml';
export interface BucketTag {
Key: string;
Value: string;
};
const errorInvalidArgument = () => errors.InvalidArgument
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
@ -32,6 +37,15 @@ export const _validator = {
&& tag.Key[0] !== undefined && tag.Value[0] !== undefined
&& typeof tag.Key[0] === 'string' && typeof tag.Value[0] === 'string',
// Allowed characters are letters, whitespace, and numbers, plus
// the following special characters: + - = . _ : /
// Maximum key length: 128 Unicode characters
// Maximum value length: 256 Unicode characters
validateTagObjectStructure: (tag: BucketTag) => tag
&& Object.keys(tag).length === 2
&& typeof tag.Key === 'string' && typeof tag.Value === 'string'
&& tag.Key.length >= 1 && tag.Value.length >= 1,
validateXMLStructure: (result: any) =>
result && Object.keys(result).length === 1 &&
result.Tagging &&
@ -100,12 +114,47 @@ function _validateTags(tags: Array<{ Key: string[], Value: string[] }>) {
}
// not repeating keys
if (tags.length > Object.keys(tagsResult).length) {
return errors.InvalidTag.customizeDescription('Cannot provide ' +
'multiple Tags with the same key');
return errors.InvalidTag.customizeDescription(
'Cannot provide multiple Tags with the same key'
);
}
return tagsResult;
}
/** areTagsValid - Validate bucket tags
* @param tags - tags parsed from xml to be validated
* @return result - true if the tags are valide, false otherwise
*/
export function areTagsValid(tags: Array<BucketTag>) {
if (tags.length === 0) {
return true;
}
// Maximum number of tags per resource: 50
if (tags.length > 50) {
return false;
}
const tagsResult = {};
for (const tag of tags) {
if (!_validator.validateTagObjectStructure(tag)) {
return false;
}
const { Key: key, Value: value } = tag;
const result = _validator.validateKeyValue(key, value);
if (result instanceof Error) {
return false;
}
tagsResult[key] = value;
}
// not repeating keys
if (tags.length > Object.keys(tagsResult).length) {
return false;
}
return true;
}
/** parseTagXml - Parse and validate xml body, returning callback with object
* tags : { key: value}
* @param xml - xml body to parse and validate

View File

@ -77,6 +77,34 @@ export function _checkUnmodifiedSince(
return { present: false, error: null };
}
/**
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
* request against last-modified date of object
* @param headers - headers from request object
* @param lastModified - last modified date of object
* @return contains modifiedSince and unmodifiedSince res objects
*/
export function checkDateModifiedHeaders(
headers: http.IncomingHttpHeaders,
lastModified: string,
) {
const lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
const millis = lastModifiedDate.getTime();
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader?.toString(),
millis);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader?.toString(),
millis);
return { modifiedSinceRes, unmodifiedSinceRes };
}
/**
* validateConditionalHeaders - validates 'if-modified-since',
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
@ -92,21 +120,14 @@ export function validateConditionalHeaders(
lastModified: string,
contentMD5: string,
): {} | { present: boolean; error: ArsenalError } {
const lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
const millis = lastModifiedDate.getTime();
const ifMatchHeader = headers['if-match'] ||
headers['x-amz-copy-source-if-match'];
const ifNoneMatchHeader = headers['if-none-match'] ||
headers['x-amz-copy-source-if-none-match'];
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const etagMatchRes = _checkEtagMatch(ifMatchHeader?.toString(), contentMD5);
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader?.toString(), contentMD5);
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader?.toString(), millis);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader?.toString(), millis);
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(headers, lastModified);
// If-Unmodified-Since condition evaluates to false and If-Match
// is not present, then return the error. Otherwise, If-Unmodified-Since is
// silent when If-Match match, and when If-Match does not match, it's the

View File

@ -13,7 +13,7 @@ import * as routesUtils from './routesUtils';
import routeWebsite from './routes/routeWebsite';
import * as http from 'http';
import StatsClient from '../metrics/StatsClient';
import { objectKeyByteLimit } from '../constants';
import * as requestUtils from '../../lib/policyEvaluator/requestUtils';
const routeMap = {
@ -67,8 +67,14 @@ function checkBucketAndKey(
blacklistedPrefixes.object);
if (!result.isValid) {
log.debug('invalid object key', { objectKey });
return errors.InvalidArgument.customizeDescription('Object key ' +
`must not start with "${result.invalidPrefix}".`);
if (result.invalidPrefix) {
return errors.InvalidArgument.customizeDescription('Invalid ' +
'prefix - object key cannot start with ' +
`"${result.invalidPrefix}".`);
}
return errors.KeyTooLong.customizeDescription('Object key is too ' +
'long. Maximum number of bytes allowed in keys is ' +
`${objectKeyByteLimit}.`);
}
}
if ((reqQuery.partNumber || reqQuery.uploadId)
@ -213,7 +219,8 @@ export default function routes(
// @ts-ignore
logger.newRequestLogger());
if (!req.url!.startsWith('/_/healthcheck')) {
if (!req.url!.startsWith('/_/healthcheck') &&
!req.url!.startsWith('/_/report')) {
log.info('received request', clientInfo);
}

View File

@ -43,6 +43,8 @@ export default function routeDELETE(
return call('bucketDeleteEncryption');
} else if (query?.tagging !== undefined) {
return call('bucketDeleteTagging');
} else if (query?.quota !== undefined) {
return call('bucketDeleteQuota');
}
call('bucketDelete');
} else {

View File

@ -58,6 +58,10 @@ export default function routerGET(
call('bucketGetNotification');
} else if (query.encryption !== undefined) {
call('bucketGetEncryption');
} else if (query.search !== undefined) {
call('metadataSearch')
} else if (query.quota !== undefined) {
call('bucketGetQuota');
} else {
// GET bucket
call('bucketGet');

View File

@ -58,6 +58,14 @@ export default function routePOST(
corsHeaders));
}
// POST Object restore
if (query.restore !== undefined) {
return api.callApiMethod('objectRestore', request, response,
log, (err, statusCode, resHeaders) =>
routesUtils.responseNoBody(err, resHeaders, response,
statusCode, log));
}
return routesUtils.responseNoBody(errors.NotImplemented, null, response,
200, log);
}

View File

@ -105,6 +105,13 @@ export default function routePUT(
return routesUtils.responseNoBody(err, corsHeaders,
response, 200, log);
});
} else if (query.quota !== undefined) {
api.callApiMethod('bucketUpdateQuota', request, response,
log, (err, resHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, resHeaders, response,
200, log);
});
} else {
// PUT bucket
return api.callApiMethod('bucketPut', request, response, log,

View File

@ -11,7 +11,7 @@ export default function routerWebsite(
api: { callApiMethod: routesUtils.CallApiMethod },
log: RequestLogger,
statsClient?: StatsClient,
dataRetrievalFn?: any,
dataRetrievalParams?: any,
) {
const { bucketName, query } = request as any
log.debug('routing request', { method: 'routerWebsite' });
@ -31,7 +31,7 @@ export default function routerWebsite(
if (redirectInfo) {
if (err && redirectInfo.withError) {
return routesUtils.redirectRequestOnError(err,
'GET', redirectInfo, dataGetInfo, dataRetrievalFn,
'GET', redirectInfo, dataGetInfo, dataRetrievalParams,
response, resMetaHeaders, log)
}
// note that key might have been modified in websiteGet
@ -45,7 +45,7 @@ export default function routerWebsite(
// user has their own error page
if (err && dataGetInfo) {
return routesUtils.streamUserErrorPage(err, dataGetInfo,
dataRetrievalFn, response, resMetaHeaders, log);
dataRetrievalParams, response, resMetaHeaders, log);
}
// send default error html response
if (err) {
@ -55,7 +55,7 @@ export default function routerWebsite(
}
// no error, stream data
return routesUtils.responseStreamData(null, query,
resMetaHeaders, dataGetInfo, dataRetrievalFn, response,
resMetaHeaders, dataGetInfo, dataRetrievalParams, response,
undefined, log);
});
}
@ -66,7 +66,7 @@ export default function routerWebsite(
if (redirectInfo) {
if (err && redirectInfo.withError) {
return routesUtils.redirectRequestOnError(err,
'HEAD', redirectInfo, null, dataRetrievalFn,
'HEAD', redirectInfo, null, dataRetrievalParams,
response, resMetaHeaders, log)
}
return routesUtils.redirectRequest(redirectInfo,

View File

@ -9,6 +9,8 @@ import errors, { ArsenalError } from '../errors';
import * as constants from '../constants';
import DataWrapper from '../storage/data/DataWrapper';
import StatsClient from '../metrics/StatsClient';
import { objectKeyByteLimit } from '../constants';
const jsutil = require('../jsutil');
export type CallApiMethod = (
methodName: string,
@ -147,6 +149,15 @@ const XMLResponseBackend = {
'<Error>',
`<Code>${errCode.message}</Code>`,
`<Message>${errCode.description}</Message>`,
);
const invalidArguments = errCode.metadata.get('invalidArguments') || [];
invalidArguments.forEach((invalidArgument, index) => {
const counter = index + 1;
const { ArgumentName, ArgumentValue } = invalidArgument as any;
xml.push(`<ArgumentName${counter}>${ArgumentName}</ArgumentName${counter}>`);
xml.push(`<ArgumentValue${counter}>${ArgumentValue}</ArgumentValue${counter}>`);
});
xml.push(
'<Resource></Resource>',
`<RequestId>${log.getSerializedUids()}</RequestId>`,
'</Error>',
@ -216,9 +227,18 @@ const JSONResponseBackend = {
"requestId": "4442587FB7D0A2F9"
}
*/
const invalidArguments = errCode.metadata.get('invalidArguments') || [];
const invalids = invalidArguments.reduce((acc, invalidArgument, index) => {
const counter = index + 1;
const { ArgumentName, ArgumentValue } = invalidArgument as any;
const name = `ArgumentName${counter}`;
const value = `ArgumentValue${counter}`;
return { ...acc, [name]: ArgumentName, [value]: ArgumentValue };
}, {});
const data = JSON.stringify({
code: errCode.message,
message: errCode.description,
...invalids,
resource: null,
requestId: log.getSerializedUids(),
});
@ -365,12 +385,18 @@ function retrieveData(
response.destroy();
responseDestroyed = true;
};
const _destroyReadable = (readable: http.IncomingMessage | null) => {
// s3-data sends Readable stream only which does not implement destroy
if (readable && readable.destroy) {
readable.destroy();
}
};
// the S3-client might close the connection while we are processing it
response.once('close', () => {
responseDestroyed = true;
if (currentStream) {
currentStream.destroy();
}
_destroyReadable(currentStream);
});
const {
@ -387,6 +413,7 @@ function retrieveData(
return eachSeries(locations,
(current, next) => data.get(current, response, log,
(err: any, readable: http.IncomingMessage) => {
const cbOnce = jsutil.once(next);
// NB: readable is of IncomingMessage type
if (err) {
log.error('failed to get object', {
@ -394,7 +421,7 @@ function retrieveData(
method: 'retrieveData',
});
_destroyResponse();
return next(err);
return cbOnce(err);
}
// response.isclosed is set by the S3 server. Might happen if
// the S3-client closes the connection before the first request
@ -403,24 +430,24 @@ function retrieveData(
if (responseDestroyed || response.isclosed) {
log.debug(
'response destroyed before readable could stream');
readable.destroy();
_destroyReadable(readable);
const responseErr = new Error();
// @ts-ignore
responseErr.code = 'ResponseError';
responseErr.message = 'response closed by client request before all data sent';
return next(responseErr);
return cbOnce(responseErr);
}
// readable stream successfully consumed
readable.on('end', () => {
currentStream = null;
log.debug('readable stream end reached');
return next();
return cbOnce();
});
// errors on server side with readable stream
readable.on('error', err => {
log.error('error piping data from source');
_destroyResponse();
return next(err);
return cbOnce(err);
});
currentStream = readable;
return readable.pipe(response, { end: false });
@ -1128,6 +1155,9 @@ export function isValidObjectKey(objectKey: string, prefixBlacklist: string[]) {
if (invalidPrefix) {
return { isValid: false, invalidPrefix };
}
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
return { isValid: false };
}
return { isValid: true };
}

View File

@ -989,13 +989,14 @@ class DataWrapper {
return this.client.delete(objectGetInfo, log.getSerializedUids(),
err => {
if (err) {
if (err.is.ObjNotFound) {
// TODO: sproxydclient and hdclient does not return standard Arsenal error yet.
if (err.code === 404) {
log.info('no such key in datastore', {
objectGetInfo,
implName: this.implName,
moreRetries: 'no',
});
return cb(err);
return cb(errors.ObjNotFound);
}
log.error('delete error from datastore', {
error: err,

View File

@ -1,11 +1,10 @@
const https = require('https');
const http = require('http');
const { http, https } = require('httpagent');
const url = require('url');
const AWS = require('aws-sdk');
const Sproxy = require('sproxydclient');
const Hyperdrive = require('hdclient');
const HttpsProxyAgent = require('https-proxy-agent');
require("aws-sdk/lib/maintenance_mode_message").suppress = true;
const constants = require('../../constants');
const DataFileBackend = require('./file/DataFileInterface');
const inMemory = require('./in_memory/datastore').backend;
@ -26,8 +25,13 @@ function parseLC(config, vault) {
if (locationObj.type === 'file') {
clients[location] = new DataFileBackend(config);
}
if (locationObj.type === 'vitastor') {
const VitastorBackend = require('./vitastor/VitastorBackend');
clients[location] = new VitastorBackend(location, locationObj.details);
}
if (locationObj.type === 'scality') {
if (locationObj.details.connector.sproxyd) {
const Sproxy = require('sproxydclient');
clients[location] = new Sproxy({
bootstrap: locationObj.details.connector
.sproxyd.bootstrap,
@ -42,6 +46,7 @@ function parseLC(config, vault) {
});
clients[location].clientType = 'scality';
} else if (locationObj.details.connector.hdclient) {
const Hyperdrive = require('hdclient');
clients[location] = new Hyperdrive.hdcontroller.HDProxydClient(
locationObj.details.connector.hdclient);
clients[location].clientType = 'scality';
@ -77,8 +82,8 @@ function parseLC(config, vault) {
connectionAgent = new HttpsProxyAgent(options);
} else {
connectionAgent = sslEnabled ?
new https.Agent(httpAgentConfig) :
new http.Agent(httpAgentConfig);
new https.Agent(httpAgentConfig, { maxSockets: false }) :
new http.Agent(httpAgentConfig, { maxSockets: false });
}
const httpOptions = { agent: connectionAgent, timeout: 0 };
const s3Params = {

View File

@ -5,6 +5,7 @@ const { parseTagFromQuery } = require('../../s3middleware/tagging');
const { externalBackendHealthCheckInterval } = require('../../constants');
const DataFileBackend = require('./file/DataFileInterface');
const { createLogger, checkExternalBackend } = require('./external/utils');
const jsutil = require('../../jsutil');
class MultipleBackendGateway {
constructor(clients, metadata, locStorageCheckFn) {
@ -199,11 +200,12 @@ class MultipleBackendGateway {
uploadPart(request, streamingV4Params, stream, size, location, key,
uploadId, partNumber, bucketName, log, cb) {
const client = this.clients[location];
const cbOnce = jsutil.once(cb);
if (client.uploadPart) {
return this.locStorageCheckFn(location, size, log, err => {
if (err) {
return cb(err);
return cbOnce(err);
}
return client.uploadPart(request, streamingV4Params, stream,
size, key, uploadId, partNumber, bucketName, log,
@ -217,14 +219,14 @@ class MultipleBackendGateway {
'metric following object PUT failure',
{ error: error.message });
}
return cb(err);
return cbOnce(err);
});
}
return cb(null, partInfo);
return cbOnce(null, partInfo);
});
});
}
return cb();
return cbOnce();
}
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,

View File

@ -8,6 +8,7 @@ const getMetaHeaders =
const { prepareStream } = require('../../../s3middleware/prepareStream');
const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
require('./utils');
const jsutil = require('../../../jsutil');
const missingVerIdInternalError = errors.InternalError.customizeDescription(
'Invalid state. Please ensure versioning is enabled ' +
@ -317,9 +318,11 @@ class AwsClient {
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
partNumber, bucketName, log, callback) {
let hashedStream = stream;
const cbOnce = jsutil.once(callback);
if (request) {
const partStream = prepareStream(request, streamingV4Params,
this._vault, log, callback);
this._vault, log, cbOnce);
hashedStream = new MD5Sum();
partStream.pipe(hashedStream);
}
@ -333,7 +336,7 @@ class AwsClient {
if (err) {
logHelper(log, 'error', 'err from data backend ' +
'on uploadPart', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable
return cbOnce(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`${this.type}: ${err.message}`),
);
@ -347,7 +350,7 @@ class AwsClient {
dataStoreName: this._dataStoreName,
dataStoreETag: noQuotesETag,
};
return callback(null, dataRetrievalInfo);
return cbOnce(null, dataRetrievalInfo);
});
}

View File

@ -1,6 +1,5 @@
const url = require('url');
const azure = require('azure-storage');
const { BlobServiceClient, StorageSharedKeyCredential, AnonymousCredential } = require('@azure/storage-blob');
const { ClientSecretCredential } = require('@azure/identity');
const errors = require('../../../errors').default;
const azureMpuUtils = require('../../../s3middleware/azureHelpers/mpuUtils');
const { validateAndFilterMpuParts } =
@ -8,55 +7,103 @@ const { validateAndFilterMpuParts } =
const { createLogger, logHelper, translateAzureMetaHeaders } =
require('./utils');
const objectUtils = require('../../../s3middleware/objectUtils');
const constants = require('../../../constants');
const packageVersion = require('../../../../package.json').version;
azure.Constants.USER_AGENT_PRODUCT_NAME = constants.productName;
azure.Constants.USER_AGENT_PRODUCT_VERSION = packageVersion;
class AzureClient {
static addQueryParams(endpoint, token) {
const url = new URL(endpoint);
const query = token.startsWith('?') ? token.slice(1) : token;
if (!url.search) {
url.search = `?${query}`;
} else if (url.search === '?') {
url.search += query;
} else {
url.search += `&${query}`;
}
return url.toString();
}
constructor(config) {
this._azureStorageEndpoint = config.azureStorageEndpoint;
this._azureStorageCredentials = config.azureStorageCredentials;
this._azureContainerName = config.azureContainerName;
this._client = azure.createBlobService(
this._azureStorageCredentials.storageAccountName,
this._azureStorageCredentials.storageAccessKey,
this._azureStorageEndpoint);
this._client.enableGlobalHttpAgent = true;
const cred = (credentialsConfig => {
switch (credentialsConfig.authMethod) {
case 'client-secret':
return new ClientSecretCredential(
credentialsConfig.tenantId,
credentialsConfig.clientId,
credentialsConfig.clientKey,
);
case 'shared-access-signature':
this._azureStorageEndpoint = AzureClient.addQueryParams(
this._azureStorageEndpoint, credentialsConfig.sasToken);
return new AnonymousCredential();
case 'shared-key':
default:
return new StorageSharedKeyCredential(
credentialsConfig.storageAccountName,
credentialsConfig.storageAccessKey,
);
}
})(this._azureStorageCredentials);
const proxyOptions = (() => {
if (!config.proxy || !config.proxy.url) {
return undefined;
}
// NOTE: config.proxy.certs is not supported
const parsedUrl = new URL(config.proxy.url);
return {
host: parsedUrl.host,
port: parsedUrl.port || 80,
username: parsedUrl.username || undefined,
password: parsedUrl.password || undefined,
};
})();
this._client = new BlobServiceClient(this._azureStorageEndpoint, cred, {
keepAliveOptions: {
enable: false, // Enable use of global HTTP agent
},
proxyOptions,
userAgentOptions: {
userAgentPrefix: `${constants.productName}/${packageVersion} `,
},
}).getContainerClient(this._azureContainerName);
this._dataStoreName = config.dataStoreName;
this._bucketMatch = config.bucketMatch;
if (config.proxy && config.proxy.url) {
const parsedUrl = url.parse(config.proxy.url);
if (!parsedUrl.port) {
parsedUrl.port = 80;
}
const proxyParams = parsedUrl;
if (config.proxy.certs) {
Object.assign(proxyParams, config.proxy.certs);
}
this._client.setProxy(proxyParams);
}
}
_errorWrapper(s3Method, azureMethod, args, log, cb) {
/**
* Run azure method call.
* @param {string} [s3Method] S3 method name
* @param {string} [azureMethod] Azure method name
* @param {ErrorWrapper~Command} [command] Actual command to run
* @param {RequestLogger} [log] Logger
* @param {ErrorWrapper~Cb} [cb] The final callback
* @returns {void}
*
* @callback ErrorWrapper~Command
* @param {azure.ContainerClient} [client] Azure client to use
* @returns {Promise<any>}
*
* @callback ErrorWrapper~Cb
* @param {azure.ArsenalError} [arsenalErr] Error returned by the command
* @param {any} [result] Result of Azure SDK command
* @returns {void}
*/
_errorWrapper(s3Method, azureMethod, command, log, cb) {
if (log) {
log.info(`calling azure ${azureMethod}`);
}
try {
this._client[azureMethod].apply(this._client, args);
} catch (err) {
const error = errors.ServiceUnavailable;
if (log) {
log.error('error thrown by Azure Storage Client Library',
{ error: err.message, stack: err.stack, s3Method,
azureMethod, dataStoreName: this._dataStoreName });
}
cb(error.customizeDescription('Error from Azure ' +
`method: ${azureMethod} on ${s3Method} S3 call: ` +
`${err.message}`));
log.info(`calling azure ${azureMethod} in ${s3Method}`);
}
command(this._client).then(
result => cb(null, result),
cb,
);
}
_createAzureKey(requestBucketName, requestObjectKey,
@ -119,6 +166,32 @@ class AzureClient {
};
}
/**
* Build Azure HTTP headers for content settings
* @param {object} [properties] The blob properties to set.
* @param {string} [properties.contentType] The MIME content type of the blob.
* The default type is application/octet-stream.
* @param {string} [properties.contentEncoding] The content encodings that have been applied
* to the blob.
* @param {string} [properties.contentLanguage] The natural languages used by this resource.
* @param {string} [properties.cacheControl] The blob's cache control.
* @param {string} [properties.contentDisposition] The blob's content disposition.
* @param {string} [properties.contentMD5] The blob's MD5 hash.
* @returns {BlobHTTPHeaders} The headers
*/
_getAzureContentSettingsHeaders(properties) {
return {
blobContentMD5: properties.contentMD5
? objectUtils.getMD5Buffer(properties.contentMD5)
: undefined,
blobContentType: properties.contentType || undefined,
blobCacheControl: properties.cacheControl || undefined,
blobContentDisposition: properties.contentDisposition || undefined,
blobContentEncoding: properties.contentEncoding || undefined,
blobContentLanguage: properties.blobContentLanguage || undefined,
};
}
put(stream, size, keyContext, reqUids, callback, skey, metadata) {
const log = createLogger(reqUids);
// before blob is put, make sure there is no ongoing MPU with same key
@ -134,50 +207,59 @@ class AzureClient {
const options = {
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
keyContext.tagging),
contentSettings: {
contentType: keyContext.contentType || undefined,
cacheControl: keyContext.cacheControl || undefined,
contentDisposition: keyContext.contentDisposition ||
undefined,
contentEncoding: keyContext.contentEncoding || undefined,
},
blobHTTPHeaders: this._getAzureContentSettingsHeaders(
keyContext || {}),
};
if (size === 0) {
return this._errorWrapper('put', 'createBlockBlobFromText',
[this._azureContainerName, azureKey, '', options,
err => {
if (err) {
logHelper(log, 'error', 'err from Azure PUT data ' +
'backend', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`Azure: ${err.message}`));
return this._errorWrapper('put', 'uploadData', async client => {
try {
await client.getBlockBlobClient(azureKey).upload('', 0, options);
return azureKey;
} catch (err) {
logHelper(log, 'error', 'err from Azure PUT data backend',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from Azure: ${err.message}`);
}
return callback(null, azureKey);
}], log, callback);
}, log, callback);
}
return this._errorWrapper('put', 'createBlockBlobFromStream',
[this._azureContainerName, azureKey, stream, size, options,
err => {
if (err) {
logHelper(log, 'error', 'err from Azure PUT data ' +
'backend', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`Azure: ${err.message}`));
return this._errorWrapper('put', 'createBlockBlobFromStream', async client => {
try {
await client.getBlockBlobClient(azureKey).upload(() => stream, size, options);
return azureKey;
} catch (err) {
logHelper(log, 'error', 'err from Azure PUT data backend',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from Azure: ${err.message}`);
}
return callback(null, azureKey);
}], log, callback);
}, log, callback);
});
}
/**
* Build BlobRequestConditions from azureStreamingOptions
* @param {object} [objectGetInfoOptions] Azure streaming options
* @param {object} [objectGetInfoOptions.accessConditions] Access conditions
* @param {Date} [objectGetInfoOptions.accessConditions.DateUnModifiedSince] Filter objects not
* modified since that date.
* @returns {BlobRequestConditions} Request conditions
*/
_getAzureConditions(objectGetInfoOptions) {
const accessConditions = objectGetInfoOptions.accessConditions || {};
return {
ifUnmodifiedSince: accessConditions.DateUnModifiedSince || undefined,
};
}
head(objectGetInfo, reqUids, callback) {
const log = createLogger(reqUids);
const { key, azureStreamingOptions } = objectGetInfo;
return this._errorWrapper('head', 'getBlobProperties',
[this._azureContainerName, key, azureStreamingOptions,
(err, data) => {
if (err) {
const { key } = objectGetInfo;
return this._errorWrapper('head', 'getBlobProperties', async client => {
try {
const data = await client.getBlockBlobClient(key).getProperties();
return data;
} catch (err) {
let logLevel;
let retError;
if (err.code === 'NotFound') {
@ -185,42 +267,46 @@ class AzureClient {
retError = errors.LocationNotFound;
} else {
logLevel = 'error';
retError = errors.ServiceUnavailable
.customizeDescription(
retError = errors.ServiceUnavailable.customizeDescription(
`Error returned from Azure: ${err.message}`);
}
logHelper(log, logLevel, 'err from Azure HEAD data backend',
err, this._dataStoreName);
return callback(retError);
throw retError;
}
return callback(null, data);
}], log, callback);
}, log, callback);
}
get(objectGetInfo, range, reqUids, callback) {
const log = createLogger(reqUids);
// for backwards compatibility
const { key, response, azureStreamingOptions } = objectGetInfo;
let streamingOptions;
let rangeStart = 0;
let rangeEnd = undefined;
if (azureStreamingOptions) {
// option coming from api.get()
streamingOptions = azureStreamingOptions;
rangeStart = (typeof azureStreamingOptions.rangeStart === 'string')
? parseInt(azureStreamingOptions.rangeStart, 10)
: azureStreamingOptions.rangeStart;
rangeEnd = (typeof azureStreamingOptions.rangeEnd === 'string')
? parseInt(azureStreamingOptions.rangeEnd, 10)
: azureStreamingOptions.rangeEnd;
} else if (range) {
// option coming from multipleBackend.upload()
const rangeStart = (typeof range[0] === 'number') ? range[0].toString() : undefined;
const rangeEnd = range[1] ? range[1].toString() : undefined;
streamingOptions = { rangeStart, rangeEnd };
rangeStart = (typeof range[0] === 'number') ? range[0] : 0;
rangeEnd = range[1] || undefined;
}
this._errorWrapper('get', 'getBlobToStream',
[this._azureContainerName, key, response, streamingOptions,
err => {
if (err) {
this._errorWrapper('get', 'getBlobToStream', async client => {
try {
const rsp = await client.getBlockBlobClient(key)
.download(rangeStart, rangeEnd - rangeStart + 1 || undefined);
rsp.readableStreamBody.pipe(response);
return response;
} catch (err) {
logHelper(log, 'error', 'err from Azure GET data backend',
err, this._dataStoreName);
return callback(errors.ServiceUnavailable);
throw errors.ServiceUnavailable;
}
return callback(null, response);
}], log, callback);
}, log, callback);
}
delete(objectGetInfo, reqUids, callback) {
@ -230,44 +316,46 @@ class AzureClient {
objectGetInfo.key;
let options;
if (typeof objectGetInfo === 'object') {
options = objectGetInfo.options;
options = {
conditions: this._getAzureConditions(objectGetInfo.options || {}),
};
}
return this._errorWrapper('delete', 'deleteBlobIfExists',
[this._azureContainerName, key, options,
err => {
if (err && err.statusCode === 412) {
return callback(errors.PreconditionFailed);
return this._errorWrapper('delete', 'deleteBlobIfExists', async client => {
try {
await client.getBlockBlobClient(key).deleteIfExists(options);
} catch (err) {
if (err.statusCode === 412) {
throw errors.PreconditionFailed;
}
if (err) {
const log = createLogger(reqUids);
logHelper(log, 'error', 'error deleting object from ' +
'Azure datastore', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`Azure: ${err.message}`));
logHelper(log, 'error', 'error deleting object from Azure datastore',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from Azure: ${err.message}`);
}
return callback();
}], log, callback);
}, log, callback);
}
healthcheck(location, callback, flightCheckOnStartUp) {
const azureResp = {};
const healthCheckAction = flightCheckOnStartUp ?
'createContainerIfNotExists' : 'doesContainerExist';
this._errorWrapper('checkAzureHealth', healthCheckAction,
[this._azureContainerName, err => {
/* eslint-disable no-param-reassign */
if (err) {
azureResp[location] = { error: err.message,
external: true };
return callback(null, azureResp);
this._errorWrapper('healthcheck', 'checkAzureHealth', async client => {
try {
if (flightCheckOnStartUp) {
await client.createIfNotExists();
} else {
await client.exists();
}
azureResp[location] = {
message:
'Congrats! You can access the Azure storage account',
message: 'Congrats! You can access the Azure storage account',
};
return callback(null, azureResp);
}], null, callback);
} catch (err) {
azureResp[location] = {
error: err.message,
external: true,
};
}
return azureResp;
}, null, callback);
}
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
@ -321,9 +409,7 @@ class AzureClient {
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
contentSettings, tagging, log, callback) {
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
const commitList = {
UncommittedBlocks: jsonList.uncommittedBlocks || [],
};
const commitList = jsonList.uncommittedBlocks || [];
let filteredPartsObj;
if (!jsonList.uncommittedBlocks) {
const { storedParts, mpuOverviewKey, splitter } = mdInfo;
@ -336,60 +422,56 @@ class AzureClient {
// part.locations is always array of 1, which contains data info
const subPartIds =
azureMpuUtils.getSubPartIds(part.locations[0], uploadId);
commitList.UncommittedBlocks.push(...subPartIds);
commitList.push(...subPartIds);
});
}
const options = {
contentSettings,
blobHTTPHeaders: this._getAzureContentSettingsHeaders(contentSettings || {}),
metadata: translateAzureMetaHeaders(metaHeaders || {}, tagging),
};
return this._errorWrapper('completeMPU', 'commitBlocks',
[this._azureContainerName, azureKey, commitList, options,
err => {
if (err) {
logHelper(log, 'error', 'err completing MPU on Azure ' +
'datastore', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`Azure: ${err.message}`));
}
const completeObjData = {
return this._errorWrapper('completeMPU', 'commitBlocks', async client => {
try {
await client.getBlockBlobClient(azureKey).commitBlockList(commitList, options);
return {
key: azureKey,
filteredPartsObj,
};
return callback(null, completeObjData);
}], log, callback);
} catch (err) {
logHelper(log, 'error', 'err completing MPU on Azure datastore',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from Azure: ${err.message}`);
}
}, log, callback);
}
objectPutTagging(key, bucket, objectMD, log, callback) {
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
const azureMD = this._getMetaHeaders(objectMD);
azureMD.tags = JSON.stringify(objectMD.tags);
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
[this._azureContainerName, azureKey, azureMD,
err => {
if (err) {
logHelper(log, 'error', 'err putting object tags to ' +
'Azure backend', err, this._dataStoreName);
return callback(errors.ServiceUnavailable);
this._errorWrapper('objectPutTagging', 'setBlobMetadata', async client => {
try {
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
} catch (err) {
logHelper(log, 'error', 'err putting object tags to Azure backend',
err, this._dataStoreName);
throw errors.ServiceUnavailable;
}
return callback();
}], log, callback);
}, log, callback);
}
objectDeleteTagging(key, bucketName, objectMD, log, callback) {
const azureKey = this._createAzureKey(bucketName, key, this._bucketMatch);
const azureMD = this._getMetaHeaders(objectMD);
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
[this._azureContainerName, azureKey, azureMD,
err => {
if (err) {
logHelper(log, 'error', 'err putting object tags to ' +
'Azure backend', err, this._dataStoreName);
return callback(errors.ServiceUnavailable);
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata', async client => {
try {
await client.getBlockBlobClient(azureKey).setMetadata(azureMD);
} catch (err) {
logHelper(log, 'error', 'err putting object tags to Azure backend',
err, this._dataStoreName);
throw errors.ServiceUnavailable;
}
return callback();
}], log, callback);
}, log, callback);
}
copyObject(request, destLocationConstraintName, sourceKey,
@ -406,54 +488,50 @@ class AzureClient {
let options;
if (storeMetadataParams.metaHeaders) {
options = { metadata:
translateAzureMetaHeaders(storeMetadataParams.metaHeaders) };
options = {
metadata: translateAzureMetaHeaders(storeMetadataParams.metaHeaders),
};
}
this._errorWrapper('copyObject', 'startCopyBlob',
[`${this._azureStorageEndpoint}` +
`${sourceContainerName}/${sourceKey}`,
this._azureContainerName, destAzureKey, options,
(err, res) => {
if (err) {
if (err.code === 'CannotVerifyCopySource') {
logHelper(log, 'error', 'Unable to access ' +
`${sourceContainerName} Azure Container`, err,
this._dataStoreName);
return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' +
`${sourceContainerName} Azure Container`),
// TODO: should we use syncCopyBlob() instead? or use poller.pollUntilDone() to wait until complete?
this._errorWrapper('copyObject', 'startCopyBlob', async client => {
let res;
try {
const poller = await client.getBlockBlobClient(destAzureKey).beginCopyFromURL(
`${this._azureStorageEndpoint}${sourceContainerName}/${sourceKey}`,
options,
);
res = poller.getOperationState().result;
if (res.copyProgress !== 'pending') {
return destAzureKey;
}
logHelper(log, 'error', 'error from data backend on ' +
'copyObject', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`AWS: ${err.message}`),
);
}
if (res.copy.status === 'pending') {
logHelper(log, 'error', 'Azure copy status is pending',
} catch (err) {
if (err.code === 'CannotVerifyCopySource') { // TOOD: may use a constant (or type) from SDK ??
logHelper(log, 'error',
`Unable to access ${sourceContainerName} Azure Container`,
err, this._dataStoreName);
const copyId = res.copy.id;
this._client.abortCopyBlob(this._azureContainerName,
destAzureKey, copyId, err => {
if (err) {
logHelper(log, 'error', 'error from data backend ' +
'on abortCopyBlob', err, this._dataStoreName);
return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' +
`AWS on abortCopyBlob: ${err.message}`),
);
throw errors.AccessDenied.customizeDescription(
`Error: Unable to access ${sourceContainerName} Azure Container`);
}
return callback(errors.InvalidObjectState
.customizeDescription('Error: Azure copy status was ' +
'pending. It has been aborted successfully'),
);
});
logHelper(log, 'error', 'error from data backend on copyObject',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from AWS: ${err.message}`);
}
return callback(null, destAzureKey);
}], log, callback);
logHelper(log, 'error', 'Azure copy status is pending', {}, this._dataStoreName);
try {
await client.getBlockBlobClient(destAzureKey).abortCopyFromURL(res.copyId);
} catch (err) {
logHelper(log, 'error', 'error from data backend on abortCopyBlob',
err, this._dataStoreName);
throw errors.ServiceUnavailable.customizeDescription(
`Error returned from AWS on abortCopyBlob: ${err.message}`);
}
throw errors.InvalidObjectState.customizeDescription(
'Error: Azure copy status was pending. It has been aborted successfully');
}, log, callback);
}
}

View File

@ -0,0 +1,696 @@
// Zenko CloudServer Vitastor data storage backend adapter
// Copyright (c) Vitaliy Filippov, 2019+
// License: VNPL-1.1 (see README.md for details)
const stream = require('stream');
const vitastor = require('vitastor');
const VOLUME_MAGIC = 'VstS3Vol';
const OBJECT_MAGIC = 'VstS3Obj';
const FLAG_DELETED = 2n;
type Volume = {
id: number,
partial_sectors: {
[key: string]: {
buffer: Buffer,
refs: number,
},
},
header: {
location: string,
bucket: string,
max_size: number,
create_ts: number,
used_ts: number,
size: number,
objects: number,
removed_objects: number,
object_bytes: number,
removed_bytes: number,
},
};
type ObjectHeader = {
size: number,
key: string,
part_num?: number,
};
class VitastorBackend
{
locationName: string;
config: {
pool_id: number,
metadata_image: string,
metadata_pool_id: number,
metadata_inode_num: number,
size_buckets: number[],
size_bucket_mul: number,
id_batch_size: number,
sector_size: number,
write_chunk_size: number,
read_chunk_size: number,
pack_objects: boolean,
// and also other parameters for vitastor itself
};
next_id: number;
alloc_id: number;
opened: boolean;
on_open: ((...args: any[]) => void)[] | null;
open_error: Error | null;
cli: any;
kv: any;
volumes: {
[bucket: string]: {
[max_size: string]: Volume,
},
};
volumes_by_id: {
[id: string]: Volume,
};
volume_delete_stats: {
[id: string]: {
count: number,
bytes: number,
},
};
constructor(locationName, config)
{
this.locationName = locationName;
this.config = config;
// validate config
this.config.pool_id = Number(this.config.pool_id) || 0;
if (!this.config.pool_id)
throw new Error('pool_id is required for Vitastor');
if (!this.config.metadata_image && (!this.config.metadata_pool_id || !this.config.metadata_inode_num))
throw new Error('metadata_image or metadata_inode is required for Vitastor');
if (!this.config.size_buckets || !this.config.size_buckets.length)
this.config.size_buckets = [ 32*1024, 128*1024, 512*1024, 2*1024, 8*1024 ];
this.config.size_bucket_mul = Number(this.config.size_bucket_mul) || 2;
this.config.id_batch_size = Number(this.config.id_batch_size) || 100;
this.config.sector_size = Number(this.config.sector_size) || 0;
if (this.config.sector_size < 4096)
this.config.sector_size = 4096;
this.config.write_chunk_size = Number(this.config.write_chunk_size) || 0;
if (this.config.write_chunk_size < this.config.sector_size)
this.config.write_chunk_size = 4*1024*1024; // 4 MB
this.config.read_chunk_size = Number(this.config.read_chunk_size) || 0;
if (this.config.read_chunk_size < this.config.sector_size)
this.config.read_chunk_size = 4*1024*1024; // 4 MB
this.config.pack_objects = !!this.config.pack_objects;
// state
this.next_id = 1;
this.alloc_id = 0;
this.opened = false;
this.on_open = null;
this.open_error = null;
this.cli = new vitastor.Client(config);
this.kv = new vitastor.KV(this.cli);
// we group objects into volumes by bucket and size
this.volumes = {};
this.volumes_by_id = {};
this.volume_delete_stats = {};
}
async _makeVolumeId()
{
if (this.next_id <= this.alloc_id)
{
return this.next_id++;
}
const id_key = 'id'+this.config.pool_id;
const [ err, prev ] = await new Promise<[ any, string ]>(ok => this.kv.get(id_key, (err, value) => ok([ err, value ])));
if (err && err != vitastor.ENOENT)
{
throw new Error(err);
}
const new_id = (parseInt(prev) || 0) + 1;
this.next_id = new_id;
this.alloc_id = this.next_id + this.config.id_batch_size - 1;
await new Promise((ok, no) => this.kv.set(id_key, this.alloc_id, err => (err ? no(new Error(err)) : ok(null)), cas_old => cas_old === prev));
return this.next_id;
}
async _getVolume(bucketName, size)
{
if (!this.opened)
{
if (this.on_open)
{
await new Promise(ok => this.on_open!.push(ok));
}
else
{
this.on_open = [];
if (this.config.metadata_image)
{
const img = new vitastor.Image(this.cli, this.config.metadata_image);
const info = await new Promise<{ pool_id: number, inode_num: number }>(ok => img.get_info(ok));
this.config.metadata_pool_id = info.pool_id;
this.config.metadata_inode_num = info.inode_num;
}
const kv_config = {};
for (const key in this.config)
{
if (key.substr(0, 3) === 'kv_')
kv_config[key] = this.config[key];
}
this.open_error = await new Promise(ok => this.kv.open(
this.config.metadata_pool_id, this.config.metadata_inode_num,
kv_config, err => ok(err ? new Error(err) : null)
));
this.opened = true;
this.on_open.map(cb => setImmediate(cb));
this.on_open = null;
}
}
if (this.open_error)
{
throw this.open_error;
}
let i;
for (i = 0; i < this.config.size_buckets.length && size >= this.config.size_buckets[i]; i++) {}
let s;
if (i < this.config.size_buckets.length)
s = this.config.size_buckets[i];
else if (this.config.size_bucket_mul > 1)
{
while (size >= s)
s = Math.floor(this.config.size_bucket_mul * s);
}
if (!this.volumes[bucketName])
{
this.volumes[bucketName] = {};
}
if (this.volumes[bucketName][s])
{
return this.volumes[bucketName][s];
}
const new_id = await this._makeVolumeId();
const new_vol = this.volumes[bucketName][s] = {
id: new_id,
// FIXME: partial_sectors should be written with CAS because otherwise we may lose quick deletes
partial_sectors: {},
header: {
location: this.locationName,
bucket: bucketName,
max_size: s,
create_ts: Date.now(),
used_ts: Date.now(),
size: this.config.sector_size, // initial position is right after header
objects: 0,
removed_objects: 0,
object_bytes: 0,
removed_bytes: 0,
},
};
this.volumes_by_id[new_id] = new_vol;
const header_text = JSON.stringify(this.volumes[bucketName][s].header);
const buf = Buffer.alloc(this.config.sector_size);
buf.write(VOLUME_MAGIC + header_text, 0);
await new Promise((ok, no) => this.cli.write(
this.config.pool_id, new_id, 0, buf, err => (err ? no(new Error(err)) : ok(null))
));
await new Promise((ok, no) => this.kv.set(
'vol_'+this.config.pool_id+'_'+new_id, header_text, err => (err ? no(new Error(err)) : ok(null)), cas_old => !cas_old
));
return new_vol;
}
toObjectGetInfo(objectKey, bucketName, storageLocation)
{
return null;
}
_bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs)
{
if ((cur_pos % this.config.sector_size) ||
Math.floor((cur_pos + cur_size) / this.config.sector_size) == Math.floor(cur_pos / this.config.sector_size))
{
const sect_pos = Math.floor(cur_pos / this.config.sector_size) * this.config.sector_size;
const sect = vol.partial_sectors[sect_pos]
? vol.partial_sectors[sect_pos].buffer
: Buffer.alloc(this.config.sector_size);
if (this.config.pack_objects)
{
// Save only if <pack_objects>
if (!vol.partial_sectors[sect_pos])
vol.partial_sectors[sect_pos] = { buffer: sect, refs: 0 };
vol.partial_sectors[sect_pos].refs++;
sector_refs.push(sect_pos);
}
let off = cur_pos % this.config.sector_size;
let i = 0;
for (; i < cur_chunks.length; i++)
{
let copy_len = this.config.sector_size - off;
copy_len = copy_len > cur_chunks[i].length ? cur_chunks[i].length : copy_len;
cur_chunks[i].copy(sect, off, 0, copy_len);
off += copy_len;
if (copy_len < cur_chunks[i].length)
{
cur_chunks[i] = cur_chunks[i].slice(copy_len);
cur_size -= copy_len;
break;
}
else
cur_size -= cur_chunks[i].length;
}
cur_chunks.splice(0, i, sect);
cur_size += this.config.sector_size;
cur_pos = sect_pos;
}
return [ cur_pos, cur_size ];
}
_bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, write_all)
{
const write_pos = cur_pos;
const write_chunks = cur_chunks;
let write_size = cur_size;
cur_chunks = [];
cur_pos += cur_size;
cur_size = 0;
let remain = (cur_pos % this.config.sector_size);
if (remain > 0)
{
cur_pos -= remain;
let last_sect = null;
if (write_all)
{
last_sect = vol.partial_sectors[cur_pos]
? vol.partial_sectors[cur_pos].buffer
: Buffer.alloc(this.config.sector_size);
if (this.config.pack_objects)
{
// Save only if <pack_objects>
if (!vol.partial_sectors[cur_pos])
vol.partial_sectors[cur_pos] = { buffer: last_sect, refs: 0 };
vol.partial_sectors[cur_pos].refs++;
sector_refs.push(cur_pos);
}
}
write_size -= remain;
if (write_size < 0)
write_size = 0;
for (let i = write_chunks.length-1; i >= 0 && remain > 0; i--)
{
if (write_chunks[i].length <= remain)
{
remain -= write_chunks[i].length;
if (write_all)
write_chunks[i].copy(last_sect, remain);
else
cur_chunks.unshift(write_chunks[i]);
write_chunks.pop();
}
else
{
if (write_all)
write_chunks[i].copy(last_sect, 0, write_chunks[i].length - remain);
else
cur_chunks.unshift(write_chunks[i].slice(write_chunks[i].length - remain));
write_chunks[i] = write_chunks[i].slice(0, write_chunks[i].length - remain);
remain = 0;
i++;
}
}
if (write_all)
{
write_chunks.push(last_sect);
write_size += this.config.sector_size;
}
}
for (const chunk of cur_chunks)
{
cur_size += chunk.length;
}
return [ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ];
}
/**
* reqUids: string, // request-ids for log, usually joined by ':'
* keyContext: {
* // a lot of shit, basically all metadata
* bucketName,
* objectKey,
* owner?,
* namespace?,
* partNumber?,
* uploadId?,
* metaHeaders?,
* isDeleteMarker?,
* tagging?,
* contentType?,
* cacheControl?,
* contentDisposition?,
* contentEncoding?,
* },
* callback: (error, objectGetInfo: any) => void,
*/
put(stream, size, keyContext, reqUids, callback)
{
callback = once(callback);
this._getVolume(keyContext.bucketName, size)
.then(vol => this._put(vol, stream, size, keyContext, reqUids, callback))
.catch(callback);
}
_put(vol, stream, size, keyContext, reqUids, callback)
{
const object_header: ObjectHeader = {
size,
key: keyContext.objectKey,
};
if (keyContext.partNumber)
{
object_header.part_num = keyContext.partNumber;
}
// header is: <8 bytes magic> <8 bytes flags> <8 bytes json length> <json>
const hdr_begin_buf = Buffer.alloc(24);
const hdr_json_buf = Buffer.from(JSON.stringify(object_header), 'utf-8');
hdr_begin_buf.write(OBJECT_MAGIC);
hdr_begin_buf.writeBigInt64LE(BigInt(hdr_json_buf.length), 16);
const object_header_buf = Buffer.concat([ hdr_begin_buf, hdr_json_buf ]);
const object_pos = vol.header.size;
const object_get_info = { volume: vol.id, offset: object_pos, hdrlen: object_header_buf.length, size };
let cur_pos = object_pos;
let cur_chunks = [ object_header_buf ];
let cur_size = object_header_buf.length;
let err: Error|null = null;
let waiting = 1; // 1 for end or error, 1 for each write request
vol.header.size += object_header_buf.length + size;
if (!this.config.pack_objects && (vol.header.size % this.config.sector_size))
{
vol.header.size += this.config.sector_size - (vol.header.size % this.config.sector_size);
}
const writeChunk = (last) =>
{
const sector_refs = [];
// Handle partial beginning
[ cur_pos, cur_size ] = this._bufferStart(vol, cur_pos, cur_size, cur_chunks, sector_refs);
// Handle partial end
let write_pos, write_chunks, write_size;
[ write_pos, write_chunks, write_size, cur_pos, cur_size, cur_chunks ] = this._bufferEnd(vol, cur_pos, cur_size, cur_chunks, sector_refs, last);
waiting++;
// FIXME: pool_id: maybe it should be stored in volume metadata to allow to migrate volumes?
this.cli.write(this.config.pool_id, vol.id, write_pos, write_chunks, (res) =>
{
for (const sect of sector_refs)
{
vol.partial_sectors[sect].refs--;
if (!vol.partial_sectors[sect].refs &&
vol.header.size >= sect+this.config.sector_size)
{
// Forget partial data when it's not needed anymore
delete(vol.partial_sectors[sect]);
}
}
waiting--;
if (res)
{
err = new Error(res);
waiting--;
}
if (!waiting)
{
callback(err, err ? null : object_get_info);
}
});
};
// Stream data
stream.on('error', (e) =>
{
err = e;
waiting--;
if (!waiting)
{
callback(err, null);
}
});
stream.on('end', () =>
{
if (err)
{
return;
}
waiting--;
if (cur_size)
{
// write last chunk
writeChunk(true);
}
if (!waiting)
{
callback(null, object_get_info);
}
});
stream.on('data', (chunk) =>
{
if (err)
{
return;
}
cur_chunks.push(chunk);
cur_size += chunk.length;
if (cur_size >= this.config.write_chunk_size)
{
// got a complete chunk, write it out
writeChunk(false);
}
});
}
/**
* objectGetInfo: {
* key: { volume, offset, hdrlen, size }, // from put
* size,
* start,
* dataStoreName,
* dataStoreETag,
* range,
* response: ServerResponse,
* },
* range?: [ start, end ], // like in HTTP - first byte index, last byte index
* callback: (error, readStream) => void,
*/
get(objectGetInfo, range, reqUids, callback)
{
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
{
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
}
const [ start, end ] = range || [];
if (start < 0 || end < 0 || end != null && start != null && end < start || start >= objectGetInfo.key.size)
{
throw new Error('Invalid range: '+start+'-'+end);
}
let offset = objectGetInfo.key.offset + objectGetInfo.key.hdrlen + (start || 0);
let len = objectGetInfo.key.size - (start || 0);
if (end)
{
const len2 = end - (start || 0) + 1;
if (len2 < len)
len = len2;
}
callback(null, new VitastorReadStream(this.cli, objectGetInfo.key.volume, offset, len, this.config));
}
/**
* objectGetInfo: {
* key: { volume, offset, hdrlen, size }, // from put
* size,
* start,
* dataStoreName,
* dataStoreETag,
* range,
* response: ServerResponse,
* },
* callback: (error) => void,
*/
delete(objectGetInfo, reqUids, callback)
{
callback = once(callback);
this._delete(objectGetInfo, reqUids)
.then(callback)
.catch(callback);
}
async _delete(objectGetInfo, reqUids)
{
if (!(objectGetInfo instanceof Object) || !objectGetInfo.key ||
!(objectGetInfo.key instanceof Object) || !objectGetInfo.key.volume ||
!objectGetInfo.key.offset || !objectGetInfo.key.hdrlen || !objectGetInfo.key.size)
{
throw new Error('objectGetInfo must be { key: { volume, offset, hdrlen, size } }, but is '+JSON.stringify(objectGetInfo));
}
const in_sect_pos = (objectGetInfo.key.offset % this.config.sector_size);
const sect_pos = objectGetInfo.key.offset - in_sect_pos;
const vol = this.volumes_by_id[objectGetInfo.key.volume];
if (vol && vol.partial_sectors[sect_pos])
{
// The sector may still be written to in corner cases
const sect = vol.partial_sectors[sect_pos];
const flags = sect.buffer.readBigInt64LE(in_sect_pos + 8);
if (!(flags & FLAG_DELETED))
{
const del_stat = this.volume_delete_stats[vol.id] = (this.volume_delete_stats[vol.id] || { count: 0, bytes: 0 });
del_stat.count++;
del_stat.bytes += objectGetInfo.key.size;
sect.buffer.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
sect.refs++;
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, sect.buffer, ok));
sect.refs--;
if (err)
{
sect.buffer.writeBigInt64LE(0n, in_sect_pos + 8);
throw new Error(err);
}
}
}
else
{
// RMW with CAS
const [ err, buf, version ] = await new Promise<[ any, Buffer, bigint ]>(ok => this.cli.read(
this.config.pool_id, objectGetInfo.key.volume, sect_pos, this.config.sector_size,
(err, buf, version) => ok([ err, buf, version ])
));
if (err)
{
throw new Error(err);
}
// FIXME What if JSON crosses sector boundary? Prevent it if we want to pack objects
const magic = buf.slice(in_sect_pos, in_sect_pos+8).toString();
const flags = buf.readBigInt64LE(in_sect_pos+8);
const json_len = Number(buf.readBigInt64LE(in_sect_pos+16));
let json_hdr;
if (in_sect_pos+24+json_len <= buf.length)
{
try
{
json_hdr = JSON.parse(buf.slice(in_sect_pos+24, in_sect_pos+24+json_len).toString());
}
catch (e)
{
}
}
if (magic !== OBJECT_MAGIC || !json_hdr || json_hdr.size !== objectGetInfo.key.size)
{
throw new Error(
'header of object with size '+objectGetInfo.key.size+
' bytes not found in volume '+objectGetInfo.key.volume+' at '+objectGetInfo.key.offset
);
}
else if (!(flags & FLAG_DELETED))
{
buf.writeBigInt64LE(flags | FLAG_DELETED, in_sect_pos + 8);
const err = await new Promise<any>(ok => this.cli.write(this.config.pool_id, objectGetInfo.key.volume, sect_pos, buf, { version: version+1n }, ok));
if (err == vitastor.EINTR)
{
// Retry
await this._delete(objectGetInfo, reqUids);
}
else if (err)
{
throw new Error(err);
}
else
{
// FIXME: Write deletion statistics to volumes
// FIXME: Implement defragmentation
const del_stat = this.volume_delete_stats[objectGetInfo.key.volume] = (this.volume_delete_stats[objectGetInfo.key.volume] || { count: 0, bytes: 0 });
del_stat.count++;
del_stat.bytes += objectGetInfo.key.size;
}
}
}
}
/**
* config: full zenko server config,
* callback: (error, stats) => void, // stats is the returned statistics in arbitrary format
*/
getDiskUsage(config, reqUids, callback)
{
// FIXME: Iterate all volumes and return its sizes and deletion statistics, or maybe just sizes
callback(null, {});
}
}
class VitastorReadStream extends stream.Readable
{
constructor(cli, volume_id, offset, len, config, options = undefined)
{
super(options);
this.cli = cli;
this.volume_id = volume_id;
this.offset = offset;
this.end = offset + len;
this.pos = offset;
this.config = config;
this._reading = false;
}
_read(n)
{
if (this._reading)
{
return;
}
// FIXME: Validate object header
const chunk_size = n && this.config.read_chunk_size < n ? n : this.config.read_chunk_size;
const read_offset = this.pos;
const round_offset = read_offset - (read_offset % this.config.sector_size);
let read_end = this.end <= read_offset+chunk_size ? this.end : read_offset+chunk_size;
const round_end = (read_end % this.config.sector_size)
? read_end + this.config.sector_size - (read_end % this.config.sector_size)
: read_end;
if (round_end <= this.end)
read_end = round_end;
this.pos = read_end;
if (read_end <= read_offset)
{
// EOF
this.push(null);
return;
}
this._reading = true;
this.cli.read(this.config.pool_id, this.volume_id, round_offset, round_end-round_offset, (err, buf, version) =>
{
this._reading = false;
if (err)
{
this.destroy(new Error(err));
return;
}
if (read_offset != round_offset || round_end != read_end)
{
buf = buf.subarray(read_offset-round_offset, buf.length-(round_end-read_end));
}
if (this.push(buf))
{
this._read(n);
}
});
}
}
function once(callback)
{
let called = false;
return function()
{
if (!called)
{
called = true;
callback.apply(null, arguments);
}
};
}
module.exports = VitastorBackend;

View File

@ -177,6 +177,42 @@ class MetadataWrapper {
});
}
updateBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, capability, log, cb) {
log.debug('updating bucket capabilities in metadata');
// When concurrency update is not supported, we update the whole bucket metadata
if (!this.client.putBucketAttributesCapabilities) {
return this.updateBucket(bucketName, bucketMD, log, cb);
}
return this.client.putBucketAttributesCapabilities(bucketName, capabilityName, capacityField, capability,
log, err => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
log.trace('bucket capabilities updated in metadata');
return cb(err);
});
}
deleteBucketCapabilities(bucketName, bucketMD, capabilityName, capacityField, log, cb) {
log.debug('deleting bucket capabilities in metadata');
// When concurrency update is not supported, we update the whole bucket metadata
if (!this.client.deleteBucketAttributesCapability) {
return this.updateBucket(bucketName, bucketMD, log, cb);
}
return this.client.deleteBucketAttributesCapability(bucketName, capabilityName, capacityField,
log, err => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
log.trace('bucket capabilities deleted in metadata');
return cb(err);
});
}
getBucket(bucketName, log, cb) {
log.debug('getting bucket from metadata');
this.client.getBucketAttributes(bucketName, log, (err, data) => {
@ -190,6 +226,19 @@ class MetadataWrapper {
});
}
getBucketQuota(bucketName, log, cb) {
log.debug('getting bucket quota from metadata');
this.client.getBucketAttributes(bucketName, log, (err, data) => {
if (err) {
log.debug('error from metadata', { implName: this.implName,
error: err });
return cb(err);
}
const bucketInfo = BucketInfo.fromObj(data);
return cb(err, { quota: bucketInfo.getQuota() });
});
}
deleteBucket(bucketName, log, cb) {
log.debug('deleting bucket from metadata');
this.client.deleteBucket(bucketName, log, err => {
@ -275,7 +324,7 @@ class MetadataWrapper {
});
}
deleteObjectMD(bucketName, objName, params, log, cb) {
deleteObjectMD(bucketName, objName, params, log, cb, originOp = 's3:ObjectRemoved:Delete') {
log.debug('deleting object from metadata');
this.client.deleteObject(bucketName, objName, params, log, err => {
if (err) {
@ -285,7 +334,7 @@ class MetadataWrapper {
}
log.debug('object deleted from metadata');
return cb(err);
});
}, originOp);
}
listObject(bucketName, listingParams, log, cb) {
@ -499,6 +548,139 @@ class MetadataWrapper {
return cb();
});
}
/**
* Put bucket indexes
*
* indexSpec format:
* [
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
* ...
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
* ]
*
*
* @param {String} bucketName bucket name
* @param {Array<Object>} indexSpecs index specification
* @param {Object} log logger
* @param {Function} cb callback
* @return {undefined}
*/
putBucketIndexes(bucketName, indexSpecs, log, cb) {
log.debug('put bucket indexes');
if (typeof this.client.putBucketIndexes !== 'function') {
log.error('error from metadata', {
method: 'putBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.putBucketIndexes(bucketName, indexSpecs, log, err => {
if (err) {
log.debug('error from metadata', {
method: 'putBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null);
});
}
/**
* Delete bucket indexes
*
* indexSpec format:
* [
* { key:[ { key: "", order: 1 } ... ], name: <id 1>, ... , < backend options> },
* ...
* { key:[ { key: "", order: 1 } ... ], name: <id n>, ... },
* ]
*
*
* @param {String} bucketName bucket name
* @param {Array<Object>} indexSpecs index specification
* @param {Object} log logger
* @param {Function} cb callback
* @return {undefined}
*/
deleteBucketIndexes(bucketName, indexSpecs, log, cb) {
log.debug('delete bucket indexes');
if (typeof this.client.deleteBucketIndexes !== 'function') {
log.error('error from metadata', {
method: 'deleteBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.deleteBucketIndexes(bucketName, indexSpecs, log, err => {
if (err) {
log.error('error from metadata', {
method: 'deleteBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null);
});
}
getBucketIndexes(bucketName, log, cb) {
log.debug('get bucket indexes');
if (typeof this.client.getBucketIndexes !== 'function') {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.getBucketIndexes(bucketName, log, (err, res) => {
if (err) {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null, res);
});
}
getIndexingJobs(log, cb) {
if (typeof this.client.getIndexingJobs !== 'function') {
log.debug('error from metadata', {
method: 'getIndexingJobs',
error: errors.NotImplemented,
implName: this.implName,
});
return cb(errors.NotImplemented);
}
return this.client.getIndexingJobs(log, (err, res) => {
if (err) {
log.debug('error from metadata', {
method: 'getBucketIndexes',
error: err,
implName: this.implName,
});
return cb(err);
}
return cb(null, res);
});
}
}
module.exports = MetadataWrapper;

View File

@ -108,9 +108,26 @@ class ListRecordStream extends stream.Readable {
if (value && value.tags) {
value.tags = unescape(value.tags);
}
entry = {
type: 'put', // updates overwrite the whole metadata,
// updates overwrite the whole metadata,
// so they are considered as puts
let type = 'put';
// When the object metadata contain the "deleted"
// flag, it means that the operation is the update
// we perform before the deletion of an object. We
// perform the update to keep all the metadata in the
// oplog. This update is what will be used by backbeat
// as the delete operation so we put the type of operation
// for this event to a delete.
// Backbeat still receives the actual delete operations
// but they are ignored as they don't contain any metadata.
// The delete operations are kept in case we want to listen
// to delete events comming from special collections other
// than "bucket" collections.
if (value && value.deleted) {
type = 'delete';
}
entry = {
type,
key: itemObj.o2._id,
// updated value may be either stored directly in 'o'
// attribute or in '$set' attribute (supposedly when

File diff suppressed because it is too large Load Diff

View File

@ -85,7 +85,8 @@ class MongoReadStream extends Readable {
Object.assign(query, searchOptions);
}
this._cursor = c.find(query).sort({
const projection = { 'value.location': 0 };
this._cursor = c.find(query, { projection }).sort({
_id: options.reverse ? -1 : 1,
});
if (options.limit && options.limit !== -1) {
@ -101,15 +102,10 @@ class MongoReadStream extends Readable {
return;
}
this._cursor.next((err, doc) => {
this._cursor.next().then(doc => {
if (this._destroyed) {
return;
}
if (err) {
this.emit('error', err);
return;
}
let key = undefined;
let value = undefined;
@ -133,6 +129,12 @@ class MongoReadStream extends Readable {
value,
});
}
}).catch(err => {
if (this._destroyed) {
return;
}
this.emit('error', err);
return;
});
}
@ -142,7 +144,7 @@ class MongoReadStream extends Readable {
}
this._destroyed = true;
this._cursor.close(err => {
this._cursor.close().catch(err => {
if (err) {
this.emit('error', err);
return;

View File

@ -185,6 +185,48 @@ function formatVersionKey(key, versionId, vFormat) {
return formatVersionKeyV0(key, versionId);
}
function indexFormatMongoArrayToObject(mongoIndexArray) {
const indexObj = [];
for (const idx of mongoIndexArray) {
const keys = [];
let entries = [];
if (idx.key instanceof Map) {
entries = idx.key.entries();
} else {
entries = Object.entries(idx.key);
}
for (const k of entries) {
keys.push({ key: k[0], order: k[1] });
}
indexObj.push({ name: idx.name, keys });
}
return indexObj;
}
function indexFormatObjectToMongoArray(indexObj) {
const mongoIndexArray = [];
for (const idx of indexObj) {
const key = new Map();
for (const k of idx.keys) {
key.set(k.key, k.order);
}
// copy all field except keys from idx
// eslint-disable-next-line
const { keys: _, ...toCopy } = idx;
mongoIndexArray.push(Object.assign(toCopy, { name: idx.name, key }));
}
return mongoIndexArray;
}
module.exports = {
credPrefix,
@ -195,4 +237,6 @@ module.exports = {
translateConditions,
formatMasterKey,
formatVersionKey,
indexFormatMongoArrayToObject,
indexFormatObjectToMongoArray,
};

View File

@ -29,5 +29,4 @@ server.start(() => {
logger.info('Metadata Proxy Server successfully started. ' +
`Using the ${metadataWrapper.implName} backend`);
});
```

View File

@ -10,21 +10,21 @@ function trySetDirSyncFlag(path) {
const GETFLAGS = 2148034049;
const SETFLAGS = 1074292226;
const FS_DIRSYNC_FL = 65536;
const FS_DIRSYNC_FL = 65536n;
const buffer = Buffer.alloc(8, 0);
const pathFD = fs.openSync(path, 'r');
const status = ioctl(pathFD, GETFLAGS, buffer);
assert.strictEqual(status, 0);
const currentFlags = buffer.readUIntLE(0, 8);
const currentFlags = buffer.readBigInt64LE(0);
const flags = currentFlags | FS_DIRSYNC_FL;
buffer.writeUIntLE(flags, 0, 8);
buffer.writeBigInt64LE(flags, 0);
const status2 = ioctl(pathFD, SETFLAGS, buffer);
assert.strictEqual(status2, 0);
fs.closeSync(pathFD);
const pathFD2 = fs.openSync(path, 'r');
const confirmBuffer = Buffer.alloc(8, 0);
ioctl(pathFD2, GETFLAGS, confirmBuffer);
assert.strictEqual(confirmBuffer.readUIntLE(0, 8),
assert.strictEqual(confirmBuffer.readBigInt64LE(0),
currentFlags | FS_DIRSYNC_FL, 'FS_DIRSYNC_FL not set');
fs.closeSync(pathFD2);
}

View File

@ -120,8 +120,8 @@ export function generateVersionId(info: string, replicationGroupId: string): str
lastSeq = lastTimestamp === ts ? lastSeq + 1 : 0;
lastTimestamp = ts;
// if S3_VERSION_ID_ENCODING_TYPE is "hex", info is used. By default, it is not used.
if (process.env.S3_VERSION_ID_ENCODING_TYPE === 'hex') {
// if S3_VERSION_ID_ENCODING_TYPE is "hex", info is used.
if (process.env.S3_VERSION_ID_ENCODING_TYPE === 'hex' || !process.env.S3_VERSION_ID_ENCODING_TYPE) {
// info field stays as is
} else {
info = ''; // eslint-disable-line

View File

@ -3,54 +3,54 @@
"engines": {
"node": ">=16"
},
"version": "7.70.31",
"version": "8.1.134",
"description": "Common utilities for the S3 project components",
"main": "build/index.js",
"repository": {
"type": "git",
"url": "git+https://github.com/scality/Arsenal.git"
},
"author": "Giorgio Regni",
"author": "Scality Inc.",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/scality/Arsenal/issues"
},
"homepage": "https://github.com/scality/Arsenal#readme",
"dependencies": {
"@azure/identity": "^3.1.1",
"@azure/storage-blob": "^12.12.0",
"@js-sdsl/ordered-set": "^4.4.2",
"@types/async": "^3.2.12",
"@types/utf8": "^3.0.1",
"JSONStream": "^1.0.0",
"@swc/cli": "^0.4.0",
"@swc/core": "^1.7.4",
"agentkeepalive": "^4.1.3",
"ajv": "6.12.2",
"async": "~2.1.5",
"ajv": "^6.12.3",
"async": "^2.6.4",
"aws-sdk": "^2.1005.0",
"azure-storage": "~2.10.7",
"backo": "^1.1.0",
"base-x": "3.0.8",
"base62": "2.0.1",
"bson": "4.0.0",
"debug": "~2.6.9",
"base-x": "^3.0.8",
"base62": "^2.0.1",
"bson": "^4.0.0",
"debug": "^4.1.0",
"diskusage": "^1.1.1",
"fcntl": "github:scality/node-fcntl#0.2.2",
"hdclient": "scality/hdclient#1.1.0",
"fcntl": "git+https://git.yourcmc.ru/vitalif/zenko-fcntl.git",
"httpagent": "git+https://git.yourcmc.ru/vitalif/zenko-httpagent.git#development/1.0",
"https-proxy-agent": "^2.2.0",
"ioredis": "^4.28.5",
"ipaddr.js": "1.9.1",
"ipaddr.js": "^1.9.1",
"joi": "^17.6.0",
"level": "~5.0.1",
"level-sublevel": "~6.6.5",
"mongodb": "^3.0.1",
"node-forge": "^0.7.1",
"prom-client": "14.2.0",
"simple-glob": "^0.2",
"socket.io": "~4.6.1",
"socket.io-client": "~4.6.1",
"sproxydclient": "github:scality/sproxydclient#8.0.4",
"utf8": "2.1.2",
"JSONStream": "^1.0.0",
"level": "^5.0.1",
"level-sublevel": "^6.6.5",
"mongodb": "^5.2.0",
"node-forge": "^1.3.0",
"prom-client": "^14.2.0",
"simple-glob": "^0.2.0",
"socket.io": "^4.6.1",
"socket.io-client": "^4.6.1",
"utf8": "^3.0.0",
"uuid": "^3.0.1",
"werelogs": "scality/werelogs#8.1.4",
"xml2js": "~0.4.23"
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"xml2js": "^0.4.23"
},
"optionalDependencies": {
"ioctl": "^2.0.2"
@ -59,22 +59,24 @@
"@babel/preset-env": "^7.16.11",
"@babel/preset-typescript": "^7.16.7",
"@sinonjs/fake-timers": "^6.0.1",
"@types/async": "^3.2.12",
"@types/utf8": "^3.0.1",
"@types/ioredis": "^4.28.10",
"@types/jest": "^27.4.1",
"@types/node": "^17.0.21",
"@types/node": "^18.19.41",
"@types/xml2js": "^0.4.11",
"eslint": "^8.12.0",
"eslint-config-airbnb": "6.2.0",
"eslint-config-scality": "scality/Guidelines#7.10.2",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-plugin-react": "^4.3.0",
"jest": "^27.5.1",
"mocha": "8.0.1",
"mongodb-memory-server": "^6.0.2",
"mongodb-memory-server": "^8.12.2",
"nyc": "^15.1.0",
"sinon": "^9.0.2",
"temp": "0.9.1",
"temp": "^0.9.1",
"ts-jest": "^27.1.3",
"ts-node": "^10.6.0",
"typescript": "^4.6.2"
"typescript": "^4.9.5"
},
"scripts": {
"lint": "eslint $(git ls-files '*.js')",
@ -82,18 +84,28 @@
"lint_yml": "yamllint $(git ls-files '*.yml')",
"test": "jest tests/unit",
"build": "tsc",
"prepare": "yarn build",
"prepack": "tsc",
"postinstall": "[ -d build ] || swc -d build --copy-files package.json index.ts lib",
"ft_test": "jest tests/functional --testTimeout=120000 --forceExit",
"coverage": "nyc --clean jest tests --coverage --testTimeout=120000 --forceExit",
"build_doc": "cd documentation/listingAlgos/pics; dot -Tsvg delimiterStateChart.dot > delimiterStateChart.svg; dot -Tsvg delimiterMasterV0StateChart.dot > delimiterMasterV0StateChart.svg; dot -Tsvg delimiterVersionsStateChart.dot > delimiterVersionsStateChart.svg"
},
"private": true,
"jest": {
"maxWorkers": 1,
"coverageReporters": [
"json"
],
"collectCoverageFrom": [
"lib/**/*.{js,ts}",
"index.js"
],
"preset": "ts-jest",
"testEnvironment": "node",
"transform": {
"^.\\.ts?$": "ts-jest"
},
"transformIgnorePatterns": [],
"globals": {
"test-jest": {
"diagnostics": {
@ -101,5 +113,12 @@
}
}
}
},
"nyc": {
"tempDirectory": "coverage",
"reporter": [
"lcov",
"text"
]
}
}

View File

@ -0,0 +1,501 @@
const async = require('async');
const assert = require('assert');
const sinon = require('sinon');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = require('../../../../lib/versioning/VersionID').generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const replicationGroupId = 'RG001';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27018 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.deleteObjectMD', () => {
let metadata;
let collection;
function getObjectCount(cb) {
collection.countDocuments()
.then(count => cb(null, count))
.catch(err => cb(err));
}
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}).then(doc => {
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
}).catch(err => cb(err));
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27018',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should delete non versioned object ${variation.vFormat}`, done => {
const params = {
objName: 'non-deleted-object',
objVal: {
key: 'non-deleted-object',
versionId: 'null',
},
};
const versionParams = {
versioning: false,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we put the master version of object
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we put the master version of a second object
params.objName = 'object-to-deleted';
params.objVal.key = 'object-to-deleted';
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// We delete the first object
metadata.deleteObjectMD(BUCKET_NAME, params.objName, null, logger, next);
},
next => {
// Object must be removed
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// only 1 object remaining in db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
});
},
], done);
});
it(`Should not throw error when object non existent ${variation.vFormat}`, done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, null);
return done();
});
});
it(`Should not throw error when bucket non existent ${variation.vFormat}`, done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, null);
return done();
});
});
it(`Master should not be updated when non lastest version is deleted ${variation.vFormat}`, done => {
let versionId1 = null;
const params = {
objName: 'test-object',
objVal: {
key: 'test-object',
versionId: 'null',
},
vFormat: 'v0',
};
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we start by creating a new version and master
versionId1 = generateVersionId(this.replicationGroupId);
params.versionId = versionId1;
params.objVal.versionId = versionId1;
versionParams.versionId = versionId1;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we create a second version of the same object (master is updated)
params.objVal.versionId = 'version2';
versionParams.versionId = null;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we delete the first version
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 },
logger, next);
},
next => {
// the first version should no longer be available
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// master must be containing second version metadata
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.notStrictEqual(data.versionId, versionId1);
return next();
});
},
next => {
// master and one version remaining in db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
});
},
], done);
});
it(`Master should be updated when last version is deleted ${variation.vFormat}`, done => {
let versionId1;
let versionId2;
const params = {
objName: 'test-object',
objVal: {
key: 'test-object',
versionId: 'null',
isLast: false,
},
};
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
return async.series([
next => {
// we start by creating a new version and master
versionId1 = generateVersionId(this.replicationGroupId);
params.versionId = versionId1;
params.objVal.versionId = versionId1;
versionParams.versionId = versionId1;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// we create a second version of the same object (master is updated)
// params.objVal.versionId = 'version2';
// versionParams.versionId = null;
versionId2 = generateVersionId(this.replicationGroupId);
params.versionId = versionId2;
params.objVal.versionId = versionId2;
versionParams.versionId = versionId2;
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, next);
},
next => {
// deleting latest version
metadata.deleteObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 },
logger, next);
},
next => {
// latest version must be removed
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId2 }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
// master must be updated to contain first version data
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.versionId, versionId1);
return next();
});
},
next => {
// one master and version in the db
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
});
},
], done);
});
it(`Should fail when version id non existent ${variation.vFormat}`, done => {
const versionId = generateVersionId(this.replicationGroupId);
const objName = 'test-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, { versionId }, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
itOnlyInV1(`Should create master when delete marker removed ${variation.vFormat}`, done => {
const objVal = {
key: 'test-object',
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let firstVersionVersionId;
let deleteMarkerVersionId;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
firstVersionVersionId = JSON.parse(res).versionId;
return next();
}),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
deleteMarkerVersionId = JSON.parse(res).versionId;
return next();
});
},
next => {
// using fake clock to override the setTimeout used by the repair
const clock = sinon.useFakeTimers();
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId: deleteMarkerVersionId },
logger, () => {
// running the repair callback
clock.runAll();
clock.restore();
return next();
});
},
// waiting for the repair callback to finish
next => setTimeout(next, 100),
// master should be created
next => {
getObject('\x7fMtest-object', (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, 'test-object');
assert.strictEqual(object.versionId, firstVersionVersionId);
assert.strictEqual(object.isDeleteMarker, false);
return next();
});
},
], done);
});
itOnlyInV1(`Should delete master when delete marker becomes last version ${variation.vFormat}`, done => {
const objVal = {
key: 'test-object',
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, next);
},
// putting new version on top of delete marker
next => {
objVal.isDeleteMarker = false;
return metadata.putObjectMD(BUCKET_NAME, 'test-object', objVal, params, logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next();
});
},
next => {
// using fake clock to override the setTimeout used by the repair
const clock = sinon.useFakeTimers();
return metadata.deleteObjectMD(BUCKET_NAME, 'test-object', { versionId },
logger, () => {
// running the repair callback
clock.runAll();
clock.restore();
return next();
});
},
// waiting for the repair callback to finish
next => setTimeout(next, 100),
// master must be deleted
next => {
getObject('\x7fMtest-object', err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
], done);
});
it('should delete the object directly if params.doesNotNeedOpogUpdate is true', done => {
const objName = 'object-to-delete';
const objVal = {
key: 'object-to-delete',
versionId: 'null',
};
const versionParams = {
versioning: false,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, next);
},
next => {
metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
});
},
next => {
getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 0);
return next();
});
},
], done);
});
it('should throw an error if params.doesNotNeedOpogUpdate is true and object does not exist', done => {
const objName = 'non-existent-object';
metadata.deleteObjectMD(BUCKET_NAME, objName, { doesNotNeedOpogUpdate: true }, logger, err => {
assert.deepStrictEqual(err, errors.InternalError);
return done();
});
});
});
});
});

View File

@ -0,0 +1,303 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = versioning.VersionID.generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { formatMasterKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const replicationGroupId = 'RG001';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27019 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.getObjectMD', () => {
let metadata;
let collection;
let versionId1;
let versionId2;
let params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
const mKey = formatMasterKey(objName, vFormat);
collection.updateOne(
{
_id: mKey,
$or: [{
'value.versionId': {
$exists: false,
},
},
{
'value.versionId': {
$gt: versionId,
},
},
],
},
{
$set: { _id: mKey, value: objVal },
},
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
}
/**
* Sets the "deleted" property to true
* @param {string} key object name
* @param {Function} cb callback
* @return {undefined}
*/
function flagObjectForDeletion(key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
versionId1 = JSON.parse(res).versionId;
return next(null);
});
},
next => {
metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
versionId2 = JSON.parse(res).versionId;
return next(null);
});
},
], done);
});
afterEach(done => {
// reset params
params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should return latest version of object ${variation.it}`, done =>
metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId2);
return done();
}));
it(`Should return the specified version of object ${variation.it}`, done =>
metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId: versionId1 }, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId1);
return done();
}));
it(`Should throw error when version non existent ${variation.it}`, done => {
const versionId = '1234567890';
return metadata.getObjectMD(BUCKET_NAME, params.objName, { versionId }, logger, (err, object) => {
assert.deepStrictEqual(object, undefined);
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should throw error when object non existent ${variation.it}`, done => {
const objName = 'non-existent-object';
return metadata.getObjectMD(BUCKET_NAME, objName, null, logger, err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should throw error when object non existent ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
return metadata.getObjectMD(bucketName, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(object, undefined);
assert.deepStrictEqual(err, errors.NoSuchKey);
return done();
});
});
it(`Should return latest version when master is PHD ${variation.it}`, done => {
async.series([
next => {
const objectName = variation.vFormat === 'v0' ? 'pfx1-test-object' : '\x7fMpfx1-test-object';
// adding isPHD flag to master
const phdVersionId = generateVersionId();
params.objVal.versionId = phdVersionId;
params.objVal.isPHD = true;
updateMasterObject(objectName, phdVersionId, params.objVal,
variation.vFormat, next);
},
// Should return latest object version
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.versionId, versionId2);
delete params.isPHD;
return next();
}),
], done);
});
it('Should fail to get an object tagged for deletion', done => {
async.series([
next => flagObjectForDeletion(params.objName, next),
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(object, undefined);
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
}),
], done);
});
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
const versioningParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
// putting a delete marker as last version
next => {
params.versionId = null;
params.objVal.isDeleteMarker = true;
return metadata.putObjectMD(BUCKET_NAME, params.objName, params.objVal, versioningParams,
logger, next);
},
next => metadata.getObjectMD(BUCKET_NAME, params.objName, null, logger, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, params.objName);
assert.strictEqual(object.isDeleteMarker, true);
params.objVal.isDeleteMarker = null;
return next();
}),
], done);
});
});
});
});

View File

@ -0,0 +1,331 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const genVID = versioning.VersionID.generateVersionId;
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { formatMasterKey, formatVersionKey } = require('../../../../lib/storage/metadata/mongoclient/utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket-batching';
const replicationGroupId = 'RG001';
const N = 10;
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27019 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
let uidCounter = 0;
function generateVersionId() {
return genVID(`${process.pid}.${uidCounter++}`,
replicationGroupId);
}
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: false },
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0, versioning: true },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: false },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1, versioning: true },
];
describe('MongoClientInterface::metadata.getObjectsMD', () => {
let metadata;
let collection;
let versionId2;
const params = {
key: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
};
function updateMasterObject(objName, versionId, objVal, vFormat, cb) {
const mKey = formatMasterKey(objName, vFormat);
collection.updateOne(
{
_id: mKey,
$or: [{
'value.versionId': {
$exists: false,
},
},
{
'value.versionId': {
$gt: versionId,
},
},
],
},
{
$set: { _id: mKey, value: objVal },
},
{ upsert: true }).then(() => cb(null)).catch(err => cb(err));
}
/**
* Sets the "deleted" property to true
* @param {string} key object name
* @param {Function} cb callback
* @return {undefined}
*/
function flagObjectForDeletion(key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27019',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' && variation.versioning ? it : it.skip;
describe(`vFormat : ${variation.vFormat}, versioning: ${variation.versioning}`, () => {
let paramsArr = [];
beforeEach(done => {
// reset params
paramsArr = Array.from({ length: N }, (_, i) => ({
key: `pfx1-test-object${i + 1}`,
objVal: {
key: `pfx1-test-object${i + 1}`,
versionId: 'null',
},
}));
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: variation.versioning,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
async.eachSeries(paramsArr, (params, eachCb) => {
metadata.putObjectMD(BUCKET_NAME, params.key, params.objVal,
versionParams, logger, (err, res) => {
if (err) {
return eachCb(err);
}
if (variation.versioning) {
// eslint-disable-next-line no-param-reassign
params.versionId = JSON.parse(res).versionId;
}
return eachCb(null);
});
}, next);
},
next => {
metadata.putObjectMD(BUCKET_NAME, paramsArr[N - 1].key, paramsArr[N - 1].objVal,
versionParams, logger, (err, res) => {
if (err) {
return next(err);
}
if (variation.versioning) {
versionId2 = JSON.parse(res).versionId;
} else {
versionId2 = 'null';
}
return next(null);
});
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`should get ${N} objects${variation.versioning ? '' : ' master'} versions using batching`, done => {
const request = paramsArr.map(({ key, objVal }) => ({
key,
params: {
versionId: variation.versioning ? objVal.versionId : null,
},
}));
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects.length, N);
objects.forEach((obj, i) => {
assert.strictEqual(obj.doc.key, paramsArr[i].key);
if (variation.versioning) {
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
}
});
return done();
});
});
it('should not throw an error if object or version is inexistent and return null doc', done => {
const request = [{
key: 'nonexistent',
params: {
versionId: variation.versioning ? 'nonexistent' : null,
},
}];
metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects.length, 1);
assert.strictEqual(objects[0].doc, null);
done();
});
});
it(`should return latest version when master is PHD ${variation.it}`, done => {
if (!variation.versioning) {
return done();
}
const request = paramsArr.map(({ key, objVal }) => ({
key,
params: {
versionId: variation.versioning ? objVal.versionId : null,
},
}));
return async.series([
next => {
let objectName = null;
if (variations.versioning) {
objectName =
formatVersionKey(paramsArr[N - 1].key, paramsArr[N - 1].versionId, variation.vFormat);
} else {
objectName = formatMasterKey(paramsArr[N - 1].key, variation.vFormat);
}
// adding isPHD flag to master
const phdVersionId = generateVersionId();
paramsArr[N - 1].objVal.versionId = phdVersionId;
paramsArr[N - 1].objVal.isPHD = true;
updateMasterObject(objectName, phdVersionId, paramsArr[N - 1].objVal,
variation.vFormat, next);
},
// Should return latest object version
next => metadata.getObjectsMD(BUCKET_NAME, request, logger, (err, objects) => {
assert.deepStrictEqual(err, null);
objects.forEach((obj, i) => {
assert.strictEqual(obj.doc.key, paramsArr[i].objVal.key);
if (variation.versioning && i === N - 1) {
assert.strictEqual(obj.doc.versionId, versionId2);
} else {
assert.strictEqual(obj.doc.versionId, paramsArr[i].objVal.versionId);
}
});
delete params.isPHD;
return next();
}),
], done);
});
it('should fail to get an object tagged for deletion', done => {
const key = paramsArr[0].key;
flagObjectForDeletion(key, err => {
assert(err);
metadata.getObjectsMD(BUCKET_NAME, [{ key }], logger, (err, object) => {
assert.strictEqual(err, null);
assert.strictEqual(object[0].doc, null);
done();
});
});
});
itOnlyInV1(`Should return last version when master deleted ${variation.vFormat}`, done => {
const versioningParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
// putting a delete marker as last version
next => {
paramsArr[0].versionId = null;
paramsArr[0].objVal.isDeleteMarker = true;
return metadata.putObjectMD(BUCKET_NAME, paramsArr[0].key, paramsArr[0].objVal,
versioningParams, logger, next);
},
next => metadata.getObjectsMD(BUCKET_NAME, [{ key: paramsArr[0].key }], logger, (err, objects) => {
assert.strictEqual(err, null);
assert.strictEqual(objects[0].doc.key, paramsArr[0].key);
assert.strictEqual(objects[0].doc.isDeleteMarker, true);
paramsArr[0].objVal.isDeleteMarker = null;
return next();
}),
], done);
});
});
});
});

View File

@ -0,0 +1,744 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { assertContents, flagObjectForDeletion, makeBucketMD, putBulkObjectVersions } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-current-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::current', () => {
let metadata;
let collection;
const expectedVersionIds = {};
const location1 = 'loc1';
const location2 = 'loc2';
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
const objName = 'pfx1-test-object';
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
dataStoreName: location1,
};
const nbVersions = 5;
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
// { "_id" : "Vpfx1-test-object{sep}id4", "value" : { "key" : "pfx1-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:00.005Z" } }
// { "_id" : "Vpfx1-test-object{sep}id3", "value" : { "key" : "pfx1-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:00.004Z" } }
// { "_id" : "Vpfx1-test-object{sep}id2", "value" : { "key" : "pfx1-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:00.003Z" } }
// { "_id" : "Vpfx1-test-object{sep}id1", "value" : { "key" : "pfx1-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:00.002Z" } }
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx1-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:00.001Z" } }
/* eslint-enable max-len */
},
next => {
const objName = 'pfx2-test-object';
const objVal = {
key: 'pfx2-test-object',
versionId: 'null',
dataStoreName: location2,
};
const nbVersions = 5;
const timestamp = 2000;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx2-test-object", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
// { "_id" : "Vpfx2-test-object{sep}id4", "value" : { "key" : "pfx2-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:02.005Z" } }
// { "_id" : "Vpfx2-test-object{sep}id3", "value" : { "key" : "pfx2-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:02.004Z" } }
// { "_id" : "Vpfx2-test-object{sep}id2", "value" : { "key" : "pfx2-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:02.003Z" } }
// { "_id" : "Vpfx2-test-object{sep}id1", "value" : { "key" : "pfx2-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:02.002Z" } }
// { "_id" : "Vpfx1-test-object{sep}id0", "value" : { "key" : "pfx2-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:02.001Z" } }
/* eslint-enable max-len */
},
next => {
const objName = 'pfx3-test-object';
const objVal = {
key: 'pfx3-test-object',
versionId: 'null',
dataStoreName: location1,
};
const nbVersions = 5;
const timestamp = 1000;
putBulkObjectVersions(metadata, BUCKET_NAME, objName, objVal, versionParams,
nbVersions, timestamp, logger, (err, data) => {
expectedVersionIds[objName] = data.lastVersionId;
return next(err);
});
/* eslint-disable max-len */
// The following versions are created:
// { "_id" : "Mpfx3-test-object", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
// { "_id" : "Vpfx3-test-object{sep}id4", "value" : { "key" : "pfx3-test-object", "versionId" : "vid4", "last-modified" : "1970-01-01T00:00:01.005Z" } }
// { "_id" : "Vpfx3-test-object{sep}id3", "value" : { "key" : "pfx3-test-object", "versionId" : "vid3", "last-modified" : "1970-01-01T00:00:01.004Z" } }
// { "_id" : "Vpfx3-test-object{sep}id2", "value" : { "key" : "pfx3-test-object", "versionId" : "vid2", "last-modified" : "1970-01-01T00:00:01.003Z" } }
// { "_id" : "Vpfx3-test-object{sep}id1", "value" : { "key" : "pfx3-test-object", "versionId" : "vid1", "last-modified" : "1970-01-01T00:00:01.002Z" } }
// { "_id" : "Vpfx3-test-object{sep}id0", "value" : { "key" : "pfx3-test-object", "versionId" : "vid0", "last-modified" : "1970-01-01T00:00:01.001Z" } }
/* eslint-enable max-len */
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it('Should list current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects excluding keys stored in location1', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions of objects with prefix and excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
prefix: 'pfx3',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return trucated list of current versions excluding keys stored in location2', done => {
const params = {
listingType: 'DelimiterCurrent',
excludedDataStoreName: location2,
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
params.marker = 'pfx1-test-object';
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
});
it('Should return empty list when beforeDate is before the objects creation date', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:00.000Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should return the current version modified before 1970-01-01T00:00:00.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:00.10Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the current versions modified before 1970-01-01T00:00:01.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the current versions modified before 1970-01-01T00:00:02.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:02.010Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should truncate the list of current versions modified before 1970-01-01T00:00:01.010Z', done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx1-test-object');
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
params.marker = 'pfx1-test-object';
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
});
it('Should truncate list of current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
maxKeys: 2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx2-test-object');
assert.strictEqual(data.Contents.length, 2);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list the following current versions of objects', done => {
const params = {
listingType: 'DelimiterCurrent',
marker: 'pfx2-test-object',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should list current versions that start with prefix', done => {
const params = {
listingType: 'DelimiterCurrent',
prefix: 'pfx2',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should return the list of current versions modified before 1970-01-01T00:00:01.010Z with prefix pfx1',
done => {
const params = {
listingType: 'DelimiterCurrent',
beforeDate: '1970-01-01T00:00:01.010Z',
maxKeys: 1,
prefix: 'pfx1',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
];
assertContents(data.Contents, expected);
return done();
});
});
it('Should not list deleted version', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
let deletedVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
deletedVersionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.deleteObjectMD(BUCKET_NAME, objVal.key,
{ versionId: deletedVersionId }, logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
it('Should not list object with delete marker', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const dmObjVal = { ...objVal, isDeleteMarker: true };
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams, logger, next),
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, dmObjVal, versionParams, logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
it('Should not list phd master key when listing current versions', done => {
const objVal = {
'key': 'pfx4-test-object',
'versionId': 'null',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
prefix: 'pfx4',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx4-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx4-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
}),
], done);
});
it('Should not list the current version tagged for deletion', done => {
const objVal = {
'key': 'pfx4-test-object',
'last-modified': new Date(0).toISOString(),
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterCurrent',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(collection, objVal.key, next),
next => metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const expected = [
{
key: 'pfx1-test-object',
LastModified: '1970-01-01T00:00:00.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx1-test-object'],
},
{
key: 'pfx2-test-object',
LastModified: '1970-01-01T00:00:02.005Z',
dataStoreName: location2,
VersionId: expectedVersionIds['pfx2-test-object'],
},
{
key: 'pfx3-test-object',
LastModified: '1970-01-01T00:00:01.005Z',
dataStoreName: location1,
VersionId: expectedVersionIds['pfx3-test-object'],
},
];
assertContents(data.Contents, expected);
return next();
}),
], done);
});
});
});
});

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,215 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { makeBucketMD } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-bucket-null';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::nullVersion', () => {
let metadata;
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
next => {
const objName = 'key0';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'versionId': 'null',
'isNull': true,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key1';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'versionId': 'null',
'isNull': true,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key1';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
// key2 simulates a scenario where:
// 1) bucket is versioned
// 2) put object key2
// 3) bucket versioning gets suspended
// 4) put object key2
// result:
// {
// "_id" : "Mkey0",
// "value" : {
// "key" : "key2",
// "isNull" : true,
// "versionId" : "<VersionId2>",
// "last-modified" : "2023-07-11T14:16:00.151Z",
// }
// },
// {
// "_id" : "Vkey0\u0000<VersionId1>",
// "value" : {
// "key" : "key2",
// "versionId" : "<VersionId1>",
// "tags" : {
// },
// "last-modified" : "2023-07-11T14:15:36.713Z",
// }
// },
next => {
const objName = 'key2';
const timestamp = 0;
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, versionParams, logger, next);
},
next => {
const objName = 'key2';
const timestamp = 0;
const params = {
versionId: '',
};
const lastModified = new Date(timestamp).toISOString();
const objVal = {
'key': objName,
'last-modified': lastModified,
'isNull': true,
};
return metadata.putObjectMD(BUCKET_NAME, objName, objVal, params, logger, next);
},
], done);
});
afterEach(done => metadata.deleteBucket(BUCKET_NAME, logger, done));
it('Should list the null current version and set IsNull to true', done => {
const params = {
listingType: 'DelimiterCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 3);
// check that key0 has a null current version
const firstKey = data.Contents[0];
assert.strictEqual(firstKey.key, 'key0');
assert.strictEqual(firstKey.value.IsNull, true);
// check that key1 has no null current version
const secondKey = data.Contents[1];
assert.strictEqual(secondKey.key, 'key1');
assert(!secondKey.value.IsNull);
// check that key2 has a null current version
const thirdKey = data.Contents[2];
assert.strictEqual(thirdKey.key, 'key2');
assert.strictEqual(thirdKey.value.IsNull, true);
return done();
});
});
it('Should list the null non-current version and set IsNull to true', done => {
const params = {
listingType: 'DelimiterNonCurrent',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 2);
// check that key1 has a null non-current version
const firstKey = data.Contents[0];
assert.strictEqual(firstKey.key, 'key1');
assert.strictEqual(firstKey.value.IsNull, true);
// check that key2 has no null non-current version
const secondKey = data.Contents[1];
assert.strictEqual(secondKey.key, 'key2');
assert(!secondKey.value.IsNull);
return done();
});
});
});
});
});

View File

@ -0,0 +1,455 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const MetadataWrapper =
require('../../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const { makeBucketMD, putBulkObjectVersions } = require('./utils');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-lifecycle-list-orphan-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface::metadata.listLifecycleObject::orphan', () => {
let metadata;
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
[BucketVersioningKeyFormat.v0, BucketVersioningKeyFormat.v1].forEach(v => {
describe(`bucket format version: ${v}`, () => {
beforeEach(done => {
const bucketMD = makeBucketMD(BUCKET_NAME);
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
metadata.client.defaultBucketKeyFormat = v;
async.series([
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
next => {
const keyName = 'pfx0-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
},
nbVersions: 1,
};
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, timestamp, logger, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 1,
};
const timestamp = 0;
putBulkObjectVersions(metadata, BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, timestamp, logger, next);
},
next => {
const keyName = 'pfx2-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(2).toISOString(), // 1970-01-01T00:00:00.002Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx3-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(0).toISOString(), // 1970-01-01T00:00:00.000Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx4-test-object';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(5).toISOString(), // 1970-01-01T00:00:00.005Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
next => {
const keyName = 'pfx4-test-object2';
const objVal = {
'key': keyName,
'isDeleteMarker': true,
'last-modified': new Date(6).toISOString(), // 1970-01-01T00:00:00.006Z
};
const params = {
versioning: true,
};
return metadata.putObjectMD(BUCKET_NAME, keyName, objVal, params, logger, next);
},
], done);
});
/* eslint-disable max-len */
// { "_id" : "Mpfx1-test-object", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx0-test-object{sep}v0", "value" : { "key" : "pfx0-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v0" } }
// { "_id" : "Vpfx1-test-object{sep}v1", "value" : { "key" : "pfx1-test-object", "versionId" : "v1", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx2-test-object{sep}v3", "value" : { "key" : "pfx2-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.002Z", "versionId" : "v3" } }
// { "_id" : "Vpfx2-test-object{sep}v2", "value" : { "key" : "pfx2-test-object", "versionId" : "v2", "last-modified" : "1970-01-01T00:00:00.001Z" } }
// { "_id" : "Vpfx3-test-object{sep}v4", "value" : { "key" : "pfx3-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.000Z", "versionId" : "v4" } }
// { "_id" : "Vpfx4-test-object{sep}v5", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.005Z", "versionId" : "v5" } }
// { "_id" : "Vpfx4-test-object2{sep}v6", "value" : { "key" : "pfx4-test-object", "isDeleteMarker" : true, "last-modified" : "1970-01-01T00:00:00.006Z", "versionId" : "v6" } }
/* eslint-enable max-len */
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it('Should list orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 4);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
return done();
});
});
it('Should return empty list when beforeDate is before youngest last-modified', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
beforeDate: '1970-01-01T00:00:00.000Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should list orphan delete markers older than 1970-01-01T00:00:00.003Z', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
beforeDate: '1970-01-01T00:00:00.003Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
return done();
});
});
it('Should return the first part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx0-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
return done();
});
});
it('Should return the second part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx0-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx3-test-object');
return done();
});
});
it('Should return the third part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx3-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the fourth part of the orphan delete markers listing', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx4-test-object',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
return done();
});
});
it('Should list the two first orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
return done();
});
});
it('Should list the four first orphan delete markers', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 4,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 4);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[3].key, 'pfx4-test-object2');
return done();
});
});
it('Should return an empty list if no orphan delete marker starts with prefix pfx2', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx2',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it('Should list orphan delete markers that start with prefix pfx4', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx4',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx4-test-object2');
return done();
});
});
it('Should return the first orphan delete marker version that starts with prefix', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
prefix: 'pfx4',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.NextMarker, 'pfx4-test-object');
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the following orphan delete marker version that starts with prefix', done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
marker: 'pfx4-test-object',
prefix: 'pfx4',
maxKeys: 1,
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert(!data.NextMarker);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object2');
return done();
});
});
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.006Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
return done();
});
});
it('Should return the following list of orphan delete markers older than 1970-01-01T00:00:00.006Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.006Z',
marker: 'pfx3-test-object',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, false);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx4-test-object');
return done();
});
});
it('Should return the truncated list of orphan delete markers older than 1970-01-01T00:00:00.001Z',
done => {
const params = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: 2,
beforeDate: '1970-01-01T00:00:00.001Z',
};
return metadata.listLifecycleObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.IsTruncated, true);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx0-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx3-test-object');
assert.strictEqual(data.NextMarker, 'pfx3-test-object');
return done();
});
});
});
});
});

View File

@ -0,0 +1,104 @@
const async = require('async');
const BucketInfo = require('../../../../../lib/models/BucketInfo').default;
const assert = require('assert');
/**
* Puts multpile versions of an object
* @param {Object} metadata - metadata client
* @param {String} bucketName - bucket name
* @param {String} objName - object key
* @param {Object} objVal - object metadata
* @param {Object} params - versioning parameters
* @param {number} versionNb - number of versions to put
* @param {number} timestamp - used for last-modified
* @param {Object} logger - a Logger instance
* @param {Function} cb - callback
* @returns {undefined}
*/
function putBulkObjectVersions(metadata, bucketName, objName, objVal, params, versionNb, timestamp, logger, cb) {
let count = 0;
const versionIds = [];
return async.whilst(
() => count < versionNb,
cbIterator => {
count++;
const lastModified = new Date(timestamp + count).toISOString();
const finalObjectVal = Object.assign(objVal, { 'last-modified': lastModified });
return metadata.putObjectMD(bucketName, objName, finalObjectVal, params, logger, (err, data) => {
versionIds.push(JSON.parse(data).versionId);
return cbIterator(err, versionIds);
});
}, (err, expectedVersionIds) => {
// The last version is removed since it represents the current version.
const lastVersionId = expectedVersionIds.pop();
// array is reversed to be alligned with the version order (latest to oldest).
expectedVersionIds.reverse();
return cb(err, { lastVersionId, expectedVersionIds });
});
}
function makeBucketMD(bucketName) {
return BucketInfo.fromObj({
_name: bucketName,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
}
function assertContents(contents, expected) {
assert.strictEqual(contents.length, expected.length);
contents.forEach((c, i) => {
assert.strictEqual(c.key, expected[i].key);
assert.strictEqual(c.value.LastModified, expected[i].LastModified);
assert.strictEqual(c.value.staleDate, expected[i].staleDate);
assert.strictEqual(c.value.dataStoreName, expected[i].dataStoreName);
if (expected[i].VersionId) {
assert.strictEqual(c.value.VersionId, expected[i].VersionId);
}
});
}
/**
* Sets the "deleted" property to true
* @param {Object} collection - collection to be updated
* @param {string} key - object name
* @param {Function} cb - callback
* @return {undefined}
*/
function flagObjectForDeletion(collection, key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false })
.then(() => cb())
.catch(err => cb(err));
}
module.exports = {
putBulkObjectVersions,
makeBucketMD,
assertContents,
flagObjectForDeletion,
};

View File

@ -0,0 +1,572 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { versioning } = require('../../../../index');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const sinon = require('sinon');
const MongoReadStream = require('../../../../lib/storage/metadata/mongoclient/readStream');
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27020 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface::metadata.listObject', () => {
let metadata;
let collection;
/**
* Puts multpile versions of an object
* @param {String} bucketName bucket name
* @param {String} objName object key
* @param {Object} objVal object metadata
* @param {Object} params versioning parameters
* @param {number} versionNb number of versions to put
* @param {Function} cb callback
* @returns {undefined}
*/
function putBulkObjectVersions(bucketName, objName, objVal, params, versionNb, cb) {
let count = 0;
async.whilst(
() => count < versionNb,
cbIterator => {
count++;
// eslint-disable-next-line
return metadata.putObjectMD(bucketName, objName, objVal, params,
logger, cbIterator);
}, cb);
}
/**
* Sets the "deleted" property to true
* @param {string} key object name
* @param {Function} cb callback
* @return {undefined}
*/
function flagObjectForDeletion(key, cb) {
collection.updateMany(
{ 'value.key': key },
{ $set: { 'value.deleted': true } },
{ upsert: false }).then(() => cb()).catch(err => cb(err));
}
function customListingParser(entries) {
return entries.map(entry => {
const tmp = JSON.parse(entry.value);
return tmp;
});
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
const versionParams = {
versioning: true,
versionId: null,
repairMaster: null,
};
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
next => {
const params = {
objName: 'pfx1-test-object',
objVal: {
key: 'pfx1-test-object',
versionId: 'null',
location: [{
start: 0,
size: 150,
dataStoreETag: 'etag',
dataStoreVersionId: 'versionId',
}],
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx2-test-object',
objVal: {
key: 'pfx2-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
next => {
const params = {
objName: 'pfx3-test-object',
objVal: {
key: 'pfx3-test-object',
versionId: 'null',
},
nbVersions: 5,
};
putBulkObjectVersions(BUCKET_NAME, params.objName, params.objVal, versionParams,
params.nbVersions, next);
},
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should list master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
assert.strictEqual(data.Contents[2].key, 'pfx3-test-object');
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 2,
};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 2);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[1].key, 'pfx2-test-object');
return done();
});
});
it(`Should list master versions of objects that start with prefix ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
prefix: 'pfx2',
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Contents.length, 1);
assert.strictEqual(data.Contents[0].key, 'pfx2-test-object');
return done();
});
});
it(`Should return empty results when bucket non existent (master) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Contents.length, 0);
return done();
});
});
it(`Should list all versions of objects ${variation.it}`, done => {
const bucketName = BUCKET_NAME;
const params = {
listingType: 'DelimiterVersions',
maxKeys: 1000,
};
const versionsPerKey = {};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 15);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
return done();
});
});
it(`Should truncate list of master versions of objects ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 5,
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
return done();
});
});
it(`Should list versions of objects that start with prefix ${variation.it}`, done => {
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
prefix: 'pfx2',
};
const versionsPerKey = {};
return metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(data.Versions.length, 5);
data.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
return done();
});
});
it(`Should return empty results when bucket not existing (version) ${variation.it}`, done => {
const bucketName = 'non-existent-bucket';
const params = {
listingType: 'DelimiterVersions',
maxKeys: 100,
};
return metadata.listObject(bucketName, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
assert(data);
assert.strictEqual(data.Versions.length, 0);
return done();
});
});
it(`Should check entire list with pagination (version) ${variation.it}`, done => {
const versionsPerKey = {};
const bucketName = BUCKET_NAME;
const get = (maxKeys, keyMarker, versionIdMarker, cb) => metadata.listObject(bucketName, {
listingType: 'DelimiterVersions',
maxKeys,
keyMarker,
versionIdMarker,
}, logger, (err, res) => {
if (err) {
return cb(err);
}
res.Versions.forEach(version => {
versionsPerKey[version.key] = (versionsPerKey[version.key] || 0) + 1;
});
if (res.IsTruncated) {
return get(maxKeys, res.NextKeyMarker, res.NextVersionIdMarker, cb);
}
return cb(null);
});
return get(3, null, null, err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(Object.keys(versionsPerKey).length, 3);
assert.strictEqual(versionsPerKey['pfx1-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx2-test-object'], 5);
assert.strictEqual(versionsPerKey['pfx3-test-object'], 5);
done();
});
});
it(`Should not list phd master key when listing masters ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
prefix: 'pfx1',
};
let versionId;
let lastVersionId;
async.series([
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
versionId = JSON.parse(res).versionId;
return next(null);
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents[0].value.VersionId, versionId);
return next();
}),
], done);
});
it(`Should not list phd master key when listing versions ${variation.it}`, done => {
const objVal = {
key: 'pfx1-test-object',
versionId: 'null',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
prefix: 'pfx1',
};
let lastVersionId;
let versionIds;
async.series([
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 5);
versionIds = data.Versions.map(version => version.VersionId);
return next();
}),
next => metadata.putObjectMD(BUCKET_NAME, 'pfx1-test-object', objVal, versionParams,
logger, (err, res) => {
if (err) {
return next(err);
}
lastVersionId = JSON.parse(res).versionId;
return next(null);
}),
// when deleting the last version of an object a PHD master is created
// and kept for 15s before it's repaired
next => metadata.deleteObjectMD(BUCKET_NAME, 'pfx1-test-object', { versionId: lastVersionId },
logger, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
const newVersionIds = data.Versions.map(version => version.VersionId);
assert.strictEqual(data.Versions.length, 5);
assert(versionIds.every(version => newVersionIds.includes(version)));
return next();
}),
], done);
});
it('Should not list objects tagged for deletion (master keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterMaster',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
const listedObjectNames = data.Contents.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
it('Should not list objects tagged for deletion (version keys)', done => {
const objVal = {
key: 'pfx4-test-object',
};
const versionParams = {
versioning: true,
};
const params = {
listingType: 'DelimiterVersions',
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, objVal.key, objVal, versionParams,
logger, next),
next => flagObjectForDeletion(objVal.key, next),
next => metadata.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Versions.length, 15);
const listedObjectNames = data.Versions.map(x => x.key);
assert(!listedObjectNames.includes(objVal.key));
return next();
}),
], done);
});
it('Should properly destroy the MongoDBReadStream', done => {
// eslint-disable-next-line func-names
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
// You can add extra logic here if needed
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
});
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, err => {
assert.ifError(err);
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
// Restore original destroy method
destroyStub.restore();
return done();
});
});
it('Should properly destroy the MongoDBReadStream on error', done => {
// eslint-disable-next-line func-names
const destroyStub = sinon.stub(MongoReadStream.prototype, 'destroy').callsFake(function (...args) {
// You can add extra logic here if needed
MongoReadStream.prototype.destroy.wrappedMethod.apply(this, ...args);
});
// stub the cursor creation to emit an error
// eslint-disable-next-line func-names
const readStub = sinon.stub(MongoReadStream.prototype, '_read').callsFake(function () {
this.emit('error', new Error('error'));
});
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return metadata.listObject(BUCKET_NAME, params, logger, err => {
assert(err, 'Expected an error');
assert(destroyStub.called, 'Destroy should have been called on MongoReadStream');
destroyStub.restore();
readStub.restore();
return done();
});
});
it('Should not include location in listing result and use custom listing parser', done => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27020',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
customListingParser,
};
const parserSpy = sinon.spy(opts, 'customListingParser');
const md = new MetadataWrapper(IMPL_NAME, opts, null, logger);
md.setup(() => {
const params = {
listingType: 'DelimiterMaster',
maxKeys: 100,
};
return md.listObject(BUCKET_NAME, params, logger, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Contents.length, 3);
assert.strictEqual(data.Contents[0].key, 'pfx1-test-object');
assert.strictEqual(data.Contents[0].location, undefined);
assert(parserSpy.called);
return done();
});
});
});
});
});
});

View File

@ -0,0 +1,462 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMPL_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'test-bucket';
const OBJECT_NAME = 'test-object';
const VERSION_ID = '98451712418844999999RG001 22019.0';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27021 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
describe('MongoClientInterface:metadata.putObjectMD', () => {
let metadata;
let collection;
function getObject(key, cb) {
collection.findOne({
_id: key,
}, {}).then(doc => {
if (!doc) {
return cb(errors.NoSuchKey);
}
return cb(null, doc.value);
}).catch(err => cb(err));
}
function getObjectCount(cb) {
collection.countDocuments()
.then(count => cb(null, count))
.catch(err => cb(err));
}
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27021',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMPL_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
const itOnlyInV1 = variation.vFormat === 'v1' ? it : it.skip;
describe(`vFormat : ${variation.vFormat}`, () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, err => {
if (err) {
return next(err);
}
collection = metadata.client.getCollection(BUCKET_NAME);
return next();
}),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
it(`Should put a new non versionned object ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: 'null',
updated: false,
};
const params = {
versioning: null,
versionId: null,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
return next();
});
},
// When versionning not active only one document is created (master)
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
}),
], done);
});
it(`Should update the metadata ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: 'null',
updated: false,
};
const params = {
versioning: null,
versionId: null,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
objVal.updated = true;
metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// object metadata must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.updated, true);
return next();
});
},
// Only a master version should be created
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 1);
return next();
}),
], done);
});
it(`Should put versionned object with the specific versionId ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// checking if metadata corresponds to what was given to the function
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.versionId, VERSION_ID);
return next();
});
},
// We'll have one master and one version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
it(`Should put new version and update master ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId = null;
async.series([
// We first create a master and a version
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
versionId = JSON.parse(data).versionId;
return next();
}),
// We put another version of the object
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.notStrictEqual(object.versionId, versionId);
return next();
});
},
// we'll have two versions and one master
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 3);
return next();
}),
], done);
});
it(`Should update master when versionning is disabled ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: null,
repairMaster: null,
};
let versionId = null;
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, (err, data) => {
assert.deepStrictEqual(err, null);
versionId = JSON.parse(data).versionId;
return next();
}),
next => {
// Disabling versionning and putting new version
params.versioning = false;
params.versionId = '';
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.notStrictEqual(object.versionId, versionId);
return next();
});
},
// The second put shouldn't create a new version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
it(`Should update latest version and repair master ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
next => {
// Updating the version and repairing master
params.repairMaster = true;
objVal.updated = true;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// Master must be updated
next => {
const key = variation.vFormat === 'v0' ? 'test-object' : '\x7fMtest-object';
getObject(key, (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.versionId, VERSION_ID);
assert.strictEqual(object.updated, true);
return next();
});
},
// The second put shouldn't create a new version
next => getObjectCount((err, count) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(count, 2);
return next();
}),
], done);
});
itOnlyInV1(`Should delete master when last version is delete marker ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// master must be deleted
next => getObject('\x7fMtest-object', err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
}),
], done);
});
itOnlyInV1(`Should create master when new version is put on top of delete marker ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
isDeleteMarker: false,
};
const params = {
versioning: true,
versionId: VERSION_ID,
repairMaster: null,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// We put a new version on top of delete marker
next => {
objVal.isDeleteMarker = false;
objVal.updated = true;
objVal.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// master must be created
next => getObject('\x7fMtest-object', (err, object) => {
assert.deepStrictEqual(err, null);
assert.strictEqual(object.key, OBJECT_NAME);
assert.strictEqual(object.updated, true);
assert.strictEqual(object.isDeleteMarker, false);
assert.notEqual(object.versionId, VERSION_ID);
return next();
}),
], done);
});
itOnlyInV1(`Should not create master when previous version is updated ${variation.it}`, done => {
const objVal = {
key: OBJECT_NAME,
versionId: VERSION_ID,
updated: false,
isDeleteMarker: false,
};
const params = {
versioning: true,
repairMaster: null,
versionId: VERSION_ID,
};
async.series([
// We first create a new version and master
next => metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next),
// putting a delete marker as last version
next => {
objVal.isDeleteMarker = true;
params.versionId = null;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
// update previous version
next => {
objVal.isDeleteMarker = false;
objVal.updated = true;
params.versionId = VERSION_ID;
return metadata.putObjectMD(BUCKET_NAME, OBJECT_NAME, objVal, params, logger, next);
},
next => getObject('\x7fMtest-object', err => {
assert.deepStrictEqual(err, errors.NoSuchKey);
return next();
}),
], done);
});
});
});
});

View File

@ -0,0 +1,336 @@
const async = require('async');
const assert = require('assert');
const werelogs = require('werelogs');
const { MongoMemoryReplSet } = require('mongodb-memory-server');
const { errors, versioning } = require('../../../../index');
const logger = new werelogs.Logger('MongoClientInterface', 'debug', 'debug');
const BucketInfo = require('../../../../lib/models/BucketInfo').default;
const MetadataWrapper =
require('../../../../lib/storage/metadata/MetadataWrapper');
const { BucketVersioningKeyFormat } = versioning.VersioningConstants;
const IMP_NAME = 'mongodb';
const DB_NAME = 'metadata';
const BUCKET_NAME = 'testbucket';
const mongoserver = new MongoMemoryReplSet({
debug: false,
instanceOpts: [
{ port: 27022 },
],
replSet: {
name: 'rs0',
count: 1,
DB_NAME,
storageEngine: 'ephemeralForTest',
},
});
describe('MongoClientInterface:withCond', () => {
let metadata;
const variations = [
{ it: '(v0)', vFormat: BucketVersioningKeyFormat.v0 },
{ it: '(v1)', vFormat: BucketVersioningKeyFormat.v1 },
];
beforeAll(done => {
mongoserver.start().then(() => {
mongoserver.waitUntilRunning().then(() => {
const opts = {
mongodb: {
replicaSetHosts: 'localhost:27022',
writeConcern: 'majority',
replicaSet: 'rs0',
readPreference: 'primary',
database: DB_NAME,
},
};
metadata = new MetadataWrapper(IMP_NAME, opts, null, logger);
metadata.setup(done);
});
});
});
afterAll(done => {
async.series([
next => metadata.close(next),
next => mongoserver.stop()
.then(() => next())
.catch(next),
], done);
});
variations.forEach(variation => {
describe('::putObjectWithCond', () => {
beforeEach(done => {
const bucketMD = BucketInfo.fromObj({
_name: BUCKET_NAME,
_owner: 'testowner',
_ownerDisplayName: 'testdisplayname',
_creationDate: new Date().toJSON(),
_acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
_mdBucketModelVersion: 10,
_transient: false,
_deleted: false,
_serverSideEncryption: null,
_versioningConfiguration: null,
_locationConstraint: 'us-east-1',
_readLocationConstraint: null,
_cors: null,
_replicationConfiguration: null,
_lifecycleConfiguration: null,
_uid: '',
_isNFS: null,
ingestion: null,
});
async.series([
next => {
metadata.client.defaultBucketKeyFormat = variation.vFormat;
return next();
},
next => metadata.createBucket(BUCKET_NAME, bucketMD, logger, next),
], done);
});
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
const tests = [
[
`should upsert object if an existing object does not exist ${variation.it}`,
{
initVal: null,
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should not update an existing object if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 0 },
},
},
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.InternalError,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { number: 24 } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
[
`should update an existing object if the conditions passes ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
upsertVal: { value: { number: 42, string: 'forty-two' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 24 },
},
},
expectedVal: { value: { number: 42, string: 'forty-two' } },
error: null,
},
],
];
tests.forEach(([msg, testCase]) => it(msg, done => {
const objectKey = 'testkey';
const {
initVal, upsertVal, conditions, expectedVal, error,
} = testCase;
const params = { conditions };
async.series([
next => {
if (!initVal) {
return next();
}
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
{}, logger, next);
},
next => metadata.putObjectWithCond(BUCKET_NAME, objectKey,
upsertVal, params, logger, err => {
if (error) {
assert.deepStrictEqual(err, error);
return next();
}
assert(!err);
return next();
}),
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
(err, res) => {
assert(!err);
assert.deepStrictEqual(res, expectedVal);
next();
}),
], done);
}));
});
describe('::deleteObjectWithCond', () => {
afterEach(done => {
metadata.deleteBucket(BUCKET_NAME, logger, done);
});
const tests = [
[
`should return no such key if the object does not exist ${variation.it}`,
{
initVal: null,
conditions: { value: { number: 24 } },
expectedVal: null,
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: { value: { number: { $eq: 24 } } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: { value: { string: 'twenty-four' } },
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should return no such key if the conditions fails ${variation.it}`,
{
initVal: { value: { number: 0, string: 'zero' } },
conditions: {
value: {
string: 'twenty-four',
number: { $eq: 0 },
},
},
expectedVal: { value: { number: 0, string: 'zero' } },
error: errors.NoSuchKey,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: { value: { number: 24 } },
expectedVal: null,
error: null,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: { value: { string: { $eq: 'twenty-four' } } },
expectedVal: null,
error: null,
},
],
[
`should successfully delete matched object ${variation.it}`,
{
initVal: { value: { number: 24, string: 'twenty-four' } },
conditions: {
value: {
string: { $eq: 'twenty-four' },
number: { $eq: 24 },
},
},
expectedVal: null,
error: null,
},
],
];
tests.forEach(([msg, testCase]) => it(msg, done => {
const objectKey = 'testkey';
const { initVal, conditions, expectedVal, error } = testCase;
const params = { conditions };
async.series([
next => {
if (!initVal) {
return next();
}
return metadata.putObjectMD(BUCKET_NAME, objectKey, initVal,
{}, logger, next);
},
next => metadata.deleteObjectWithCond(BUCKET_NAME, objectKey,
params, logger, err => {
if (error) {
assert.deepStrictEqual(err, error);
return next();
}
assert(!err);
return next();
}),
next => metadata.getObjectMD(BUCKET_NAME, objectKey, {}, logger,
(err, res) => {
if (expectedVal) {
assert.deepStrictEqual(res, expectedVal);
} else {
assert.deepStrictEqual(err, errors.NoSuchKey);
}
return next();
}),
], done);
}));
});
});
});

View File

@ -0,0 +1,316 @@
'use strict'; // eslint-disable-line strict
const werelogs = require('werelogs');
const assert = require('assert');
const async = require('async');
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
const MetadataWrapper =
require('../../../lib/storage/metadata/MetadataWrapper');
const BucketRoutes =
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
const routes = new BucketRoutes(metadataWrapper, logger);
const dispatcher = new RequestDispatcher(routes);
const Bucket = 'test';
const bucketInfo = {
acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
name: Bucket,
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
ownerDisplayName: 'anonymousCoward',
creationDate: '2018-06-04T17:45:42.592Z',
mdBucketModelVersion: 8,
transient: false,
deleted: false,
serverSideEncryption: null,
versioningConfiguration: null,
locationConstraint: 'us-east-1',
readLocationConstraint: 'us-east-1',
cors: null,
replicationConfiguration: null,
lifecycleConfiguration: null,
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
isNFS: null,
};
const objects = [
'aaa',
'bbb/xaa',
'bbb/xbb',
'bbb/xcc',
'ccc',
'ddd',
];
function _getExpectedListing(prefix, objects) {
const filtered = objects.map(key => {
const deprefixed = key.slice(prefix.length);
return deprefixed.replace(/[/].*/, '/');
});
const keySet = {};
return filtered.filter(key => {
if (keySet[key]) {
return false;
}
if (key === '') {
return false;
}
keySet[key] = true;
return true;
});
}
function _listingURL(prefix, marker) {
const reSlash = /[/]/g;
const escapedPrefix = prefix.replace(reSlash, '%2F');
const escapedMarker = marker.replace(reSlash, '%2F');
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
}
function _listObjects(prefix, objects, cb) {
const keys = _getExpectedListing(prefix, objects);
const markers = keys.slice(0);
markers.unshift(undefined);
const lastKey = keys[keys.length - 1];
const listing = keys.map(key => ({
key,
IsTruncated: key !== lastKey,
isPrefix: key.endsWith('/'),
}));
let nextMarker = '';
async.mapSeries(listing, (obj, next) => {
dispatcher.get(_listingURL(prefix, nextMarker),
(err, response, body) => {
if (err) {
return next(err);
}
if (obj.isPrefix) {
assert.strictEqual(body.Contents.length, 0);
assert.strictEqual(body.CommonPrefixes.length,
1);
assert.strictEqual(body.CommonPrefixes[0],
prefix + obj.key);
} else {
assert.strictEqual(body.Contents.length, 1);
assert.strictEqual(body.CommonPrefixes.length,
0);
assert.strictEqual(body.Contents[0].key,
prefix + obj.key);
}
assert.strictEqual(body.IsTruncated,
obj.IsTruncated);
if (body.IsTruncated) {
nextMarker = body.NextMarker;
}
return next();
});
}, err => cb(err));
}
function _createObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
{ key }, next);
}, err => {
cb(err);
});
}
function _readObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
(err, response, body) => {
assert.deepStrictEqual(body.key, key);
next(err);
});
}, err => {
cb(err);
});
}
function _deleteObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
err => next(err));
}, err => {
cb(err);
});
}
describe('Basic Metadata Proxy Server test',
() => {
jest.setTimeout(10000);
it('Shoud get the metadataInformation', done => {
dispatcher.get('/default/metadataInformation',
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(
body, { metadataVersion: 2 });
return done();
});
});
});
describe('Basic Metadata Proxy Server CRUD test', () => {
jest.setTimeout(10000);
beforeEach(done => {
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
done);
});
afterEach(done => {
dispatcher.delete(`/default/bucket/${Bucket}`, done);
});
it('Should get the bucket attributes', done => {
dispatcher.get(`/default/attributes/${Bucket}`,
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(body.name,
bucketInfo.name);
return done();
});
});
it('Should crud an object', done => {
async.waterfall([
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'gabu' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'gabu');
next(err);
}
}),
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'zome' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'zome');
next(err);
}
}),
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
err => next(err)),
], err => done(err));
});
it('Should list objects', done => {
async.waterfall([
next => _createObjects(objects, next),
next => _readObjects(objects, next),
next => _listObjects('', objects, next),
next => _listObjects('bbb/', objects, next),
next => _deleteObjects(objects, next),
], err => {
done(err);
});
});
it('Should update bucket properties', done => {
dispatcher.get(
`/default/attributes/${Bucket}`, (err, response, body) => {
assert.strictEqual(err, null);
const bucketInfo = body;
const newOwnerDisplayName = 'divertedfrom';
bucketInfo.ownerDisplayName = newOwnerDisplayName;
dispatcher.post(
`/default/attributes/${Bucket}`, bucketInfo, err => {
assert.strictEqual(err, null);
dispatcher.get(
`/default/attributes/${Bucket}`,
(err, response, body) => {
assert.strictEqual(err, null);
const newBucketInfo = body;
assert.strictEqual(
newBucketInfo.ownerDisplayName,
newOwnerDisplayName);
done(null);
});
});
});
});
it('Should fail to list a non existing bucket', done => {
dispatcher.get('/default/bucket/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('Should fail to get attributes from a non existing bucket', done => {
dispatcher.get('/default/attributes/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('should succeed a health check', done => {
dispatcher.get('/_/healthcheck', (err, response, body) => {
if (err) {
return done(err);
}
const expectedResponse = {
memorybucket: {
code: 200,
message: 'OK',
},
};
assert.strictEqual(response.responseHead.statusCode, 200);
assert.deepStrictEqual(body, expectedResponse);
return done(err);
});
});
it('should work with parallel route', done => {
const objectName = 'theObj';
async.waterfall([
next => _createObjects([objectName], next),
next => {
dispatcher.get(
`/default/parallel/${Bucket}/${objectName}`,
(err, response, body) => {
if (err) {
return next(err);
}
assert.strictEqual(response.responseHead.statusCode,
200);
const bucketMD = JSON.parse(body.bucket);
const objectMD = JSON.parse(body.obj);
const expectedObjectMD = { key: objectName };
assert.deepStrictEqual(bucketMD.name,
bucketInfo.name);
assert.deepStrictEqual(objectMD, expectedObjectMD);
return next(err);
});
},
next => _deleteObjects([objectName], next),
], done);
});
});

View File

@ -0,0 +1,318 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const RedisClient = require('../../../lib/metrics/RedisClient').default;
const StatsModel = require('../../../lib/metrics/StatsModel').default;
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: true,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const STATS_INTERVAL = 300; // 5 minutes
const STATS_EXPIRY = 86400; // 24 hours
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
function setExpectedStats(expected) {
return expected.concat(
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
}
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('StatsModel class', () => {
const id = 'arsenal-test';
const id2 = 'test-2';
const id3 = 'test-3';
afterEach(() => redisClient.clear(() => {}));
it('should convert a 2d array columns into rows and vice versa using _zip',
() => {
const arrays = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
];
const res = statsModel._zip(arrays);
const expected = [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
];
assert.deepStrictEqual(res, expected);
});
it('_zip should return an empty array if given an invalid array', () => {
const arrays = [];
const res = statsModel._zip(arrays);
assert.deepStrictEqual(res, []);
});
it('_getCount should return a an array of all valid integer values',
() => {
const res = statsModel._getCount([
[null, '1'],
[null, '2'],
[null, null],
]);
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
});
it('should correctly record a new request by default one increment',
done => {
async.series([
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 2], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should record new requests by defined amount increments', done => {
function noop() {}
async.series([
next => {
statsModel.reportNewRequest(id, 9);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
next();
});
},
next => {
statsModel.reportNewRequest(id);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([10]));
next();
});
},
next => {
statsModel.reportNewRequest(id, noop);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([11]));
next();
});
},
], done);
});
it('should correctly record a 500 on the server', done => {
statsModel.report500(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
done();
});
});
it('should respond back with total requests as an array', done => {
async.series([
next => {
statsModel.reportNewRequest(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.report500(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([1]),
'500s': setExpectedStats([1]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should not crash on empty results', done => {
async.series([
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should return a zero-filled array if no ids are passed to getAllStats',
done => {
statsModel.getAllStats(fakeLogger, [], (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([]));
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
done();
});
});
it('should get accurately reported data for given id from getAllStats',
done => {
statsModel.reportNewRequest(id, 9);
statsModel.reportNewRequest(id2, 2);
statsModel.reportNewRequest(id3, 3);
statsModel.report500(id);
async.series([
next => {
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 9);
assert.equal(res['500s'][0], 1);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, [id, id2, id3],
(err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 14);
assert.deepStrictEqual(res.requests,
setExpectedStats([14]));
next();
});
},
], done);
});
it('should normalize to the nearest hour using normalizeTimestampByHour',
() => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = new Date(statsModel.normalizeTimestampByHour(date));
assert.strictEqual(date.getHours(), newDate.getHours());
assert.strictEqual(newDate.getMinutes(), 0);
assert.strictEqual(newDate.getSeconds(), 0);
assert.strictEqual(newDate.getMilliseconds(), 0);
});
it('should get previous hour using _getDatePreviousHour', () => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = statsModel._getDatePreviousHour(new Date(date));
const millisecondsInOneHour = 3600000;
assert.strictEqual(date - newDate, millisecondsInOneHour);
});
it('should get an array of hourly timestamps using getSortedSetHours',
() => {
const epoch = 1536882476501;
const millisecondsInOneHour = 3600000;
const expected = [];
let dateInMilliseconds = statsModel.normalizeTimestampByHour(
new Date(epoch));
for (let i = 0; i < 24; i++) {
expected.push(dateInMilliseconds);
dateInMilliseconds -= millisecondsInOneHour;
}
const res = statsModel.getSortedSetHours(epoch);
assert.deepStrictEqual(res, expected);
});
it('should apply TTL on a new sorted set using addToSortedSet', done => {
const key = 'a-test-key';
const score = 100;
const value = 'a-value';
const now = Date.now();
const nearestHour = statsModel.normalizeTimestampByHour(new Date(now));
statsModel.addToSortedSet(key, score, value, (err, res) => {
assert.ifError(err);
// check both a "zadd" and "expire" occurred
assert.equal(res, 1);
redisClient.ttl(key, (err, res) => {
assert.ifError(err);
// assert this new set has a ttl applied
assert(res > 0);
const adjustmentSecs = now - nearestHour;
const msInADay = 24 * 60 * 60 * 1000;
const msInAnHour = 60 * 60 * 1000;
const upperLimitSecs =
Math.ceil((msInADay - adjustmentSecs) / 1000);
const lowerLimitSecs =
Math.floor((msInADay - adjustmentSecs - msInAnHour) / 1000);
// assert new ttl is between 23 and 24 hours adjusted by time
// elapsed since normalized hourly time
assert(res >= lowerLimitSecs);
assert(res <= upperLimitSecs);
done();
});
});
});
});

Some files were not shown because too many files have changed in this diff Show More