Compare commits

...

1012 Commits

Author SHA1 Message Date
Vitaliy Filippov 19855115ae Use TS? 2024-08-06 19:56:20 +03:00
Vitaliy Filippov 329d8ef32c Add Vitastor support 2024-08-05 02:23:54 +03:00
Vitaliy Filippov f0ded4ea4f Use swc to transpile during installation 2024-08-04 00:00:10 +03:00
Vitaliy Filippov 3eea263384 Use ^ dependencies, suppress aws-sdk maintenance mode message 2024-08-04 00:00:01 +03:00
Vitaliy Filippov c26d4f7d70 Fix readUInt with length 8 2024-08-04 00:00:01 +03:00
Vitaliy Filippov 63137e7a7b Change git dependency URLs 2024-08-04 00:00:01 +03:00
Vitaliy Filippov fdb23b1cd2 Remove yarn lock 2024-08-04 00:00:01 +03:00
Vitaliy Filippov 4120eac127 Make sproxydclient and hdclient dependencies optional 2024-08-04 00:00:01 +03:00
Maha Benzekri d9bbd6cf3e
bump project version
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:22:01 +02:00
Maha Benzekri 65e89d286d
ensure callback is only called once on AwsClient
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:21:56 +02:00
Maha Benzekri dcbc5ca98f
ensure callback is only called once on MutipleBackendGateway
Issue : https://scality.atlassian.net/browse/ARSN-426
2024-07-31 11:21:44 +02:00
Maha Benzekri 817bb836ec
ARSN-420: bump arsenal version 2024-07-15 15:20:08 +02:00
Maha Benzekri e3e4b2aea7
ARSN-420: putObjectNoVar function update with hack
We agreed on Introducing the same “hack” as in internalDelete function,
so write the MD twice in the oplog: one "deleted: true" copy of the previous MD,
followed by the expected update with the new metadata
2024-07-15 15:19:06 +02:00
Francois Ferrand 9cd72221e8
Bump arsenal 8.1.132
Issue: ARSN-421
2024-07-10 18:45:22 +02:00
Francois Ferrand bdcd4685ad
gha: bump codecov v4
and use codecov token.

Issue: ARSN:421
2024-07-10 18:45:22 +02:00
Francois Ferrand b2b6c47ba7
Introduce objectGetArchiveInfo verb
This may be used to allow access to more details about archived objects.

Issue: ARSN-421
2024-07-10 18:29:53 +02:00
Jonathan Gramain da173d53b4 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-425-listingLatestCrashWithUndefined' into w/8.1/bugfix/ARSN-425-listingLatestCrashWithUndefined 2024-07-08 11:28:59 -07:00
Jonathan Gramain 7eb2701f21 Merge remote-tracking branch 'origin/bugfix/ARSN-425-listingLatestCrashWithUndefined' into w/7.70/bugfix/ARSN-425-listingLatestCrashWithUndefined 2024-07-08 11:03:50 -07:00
Jonathan Gramain 6ec3c8e10d ARSN-425 bump arsenal version 2024-07-08 10:59:25 -07:00
Jonathan Gramain 7aaf277db2 bf: ARSN-425 listing crash if key contains "undefined"
Fix a crash in DelimiterMaster listing without a delimiter, when a key
contains the string "undefined".

Note: a similar fix was done in ARSN-330 for DelimiterVersions. I
ported the existing unit test there to the development/7.10 branch to
enhance regression testing, even though this bug on DelimiterVersions
only existed on 7.70.
2024-07-08 10:56:48 -07:00
Francois Ferrand 67421f8c76
Merge branch 'w/7.70/improvement/ARSN-415' into w/8.1/improvement/ARSN-415 2024-05-10 14:28:11 +02:00
Francois Ferrand bf2260b1ae
Merge branch 'improvement/ARSN-415' into w/7.70/improvement/ARSN-415 2024-05-10 14:27:00 +02:00
Francois Ferrand 11e0e1b489
Bump gha actions
- checkout@v4
- codeql@v2
- dependency-review@v4
- setup-node@v4
- artifacts@v4

Issue: ARSN-415
2024-05-10 14:26:29 +02:00
Anurag Mittal f13ec2cf4c
Merge remote-tracking branch 'origin/bugfix/ARSN-412-add-support-for-exists-condition' into w/8.1/bugfix/ARSN-412-add-support-for-exists-condition 2024-05-03 13:37:07 +02:00
Anurag Mittal e369c7e6d2
ARSN-412: bump-package.json-to-v7.70.31 2024-05-03 13:34:46 +02:00
Anurag Mittal c5c1db4568
ARSN-412-test-relevant-errors 2024-05-03 13:34:16 +02:00
Anurag Mittal 58f4d3cb3a
VAULT-412-add-unit-test-for-conditions 2024-05-03 13:34:16 +02:00
Anurag Mittal b049f39e2a
ARSN-412: add support for exists pre-condition 2024-05-03 13:34:16 +02:00
williamlardier 30eaaf15eb ARSN-406: bump project version 2024-05-02 09:01:13 +02:00
williamlardier 9d16fb0a34 ARSN-406: create the QuotaExceeded error 2024-05-02 09:01:06 +02:00
williamlardier cdc612f379 ARSN-406: add quota numbers in report 2024-05-02 09:00:51 +02:00
williamlardier 61dd65b2c4 ARSN-406: add request context options for quota evaluation 2024-05-02 09:00:00 +02:00
bert-e 2c0696322e Merge branch 'improvement/ARSN-410-quotas-for-bucket-apis' into q/8.1 2024-04-30 16:08:07 +00:00
Maha Benzekri c464a70b90
ARSN-410: bump project version 2024-04-30 17:19:42 +02:00
Maha Benzekri af07bb3df4
ARSN-410: adding api methods in actionMonitoringMapS3 2024-04-30 17:19:20 +02:00
Maha Benzekri 1858654f34
ARSN-410: new no such quota error 2024-04-30 17:18:54 +02:00
Maha Benzekri 0475c8520a
ARSN-410: update routes for bucket get/put/delete quota 2024-04-30 17:18:12 +02:00
Maha Benzekri 31a4de5372
ARSN-410: add getbucketQuota in metaDataWrapper 2024-04-30 17:17:46 +02:00
Maha Benzekri 0c53d13439
ARSN-410: update bucketInfo test 2024-04-30 17:17:18 +02:00
Maha Benzekri cad8b14df1
ARSN-410: update bucketInfo and md 2024-04-30 17:16:50 +02:00
Nicolas Humbert fe29bacc79 Merge remote-tracking branch 'origin/bugfix/ARSN-413/null' into w/8.1/bugfix/ARSN-413/null 2024-04-30 10:26:58 +02:00
Nicolas Humbert a86cff4631 ARSN-413 bump package version 2024-04-26 19:37:11 +02:00
Kerkesni f13a5d79ea bugfix: ARSN-278 handle getting versionId when object is versioning suspended
When replicating a versioning suspended object, we need to specify 'null'
as the encoded versionId as the versionId contained within the object's
metadata is strictly internal

In the replication processor we use getVersionId() when putting/deleting a tag.
It's used by the mongoClient to fetch the object from MongoDB, here again we
need to specify 'null' to get the versioning suspended object (cloudserver already
knows how to handle 'null' versionId and transforms it to undefined before giving
it to the mongoClient)

(cherry picked from commit d1cd7e8dba)
2024-04-26 17:20:36 +02:00
Maha Benzekri ca8f570f15
ARSN-404: project bump 2024-04-05 11:35:52 +02:00
Maha Benzekri a4bca10faf
ARSN-404: adding permission in BP and IAM action Map 2024-04-05 11:35:52 +02:00
Jonathan Gramain c2ab4a2052 ARSN-402 [8.1] typescript fixes 2024-03-13 09:10:25 -07:00
Jonathan Gramain fd0aa314eb Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-402-batchDeleteRequestLogger' into w/8.1/bugfix/ARSN-402-batchDeleteRequestLogger 2024-03-13 09:10:21 -07:00
Jonathan Gramain a643a3e6cc Merge remote-tracking branch 'origin/bugfix/ARSN-402-batchDeleteRequestLogger' into w/7.70/bugfix/ARSN-402-batchDeleteRequestLogger 2024-03-13 09:08:05 -07:00
Jonathan Gramain e9d815cc9d ARSN-402 bump arsenal version 2024-03-13 08:40:02 -07:00
Jonathan Gramain c86d24fc8f bf: ARSN-402 sanitize use of log object in DataWrapper.delete()
Don't assume that we can safely call `end()` on the passed log object
if there is no callback (separation of concerns). Additionally, an
error object was passed where `end()` expects a string as a message,
causing implicit conversion.

Since errors are already logged, there is no need to bind the
`callback` object to `log.end` (there is no strong reason to log the
elapsed time there, the only use I can see where we don't pass a
callback in Cloudserver is to support deletion of old metadata with a
string as location array. IMHO not worth the complexity of adding it
there, as the rest of the API doesn't log elapsed time anyways except
for `batchDelete`).
2024-03-13 08:39:35 -07:00
Jonathan Gramain 3b6d3838f5 bf: ARSN-402 use local RequestLogger in batchDelete
Create a local RequestLogger in batchDelete(): this allows to track
the elapsed time of the batch delete sub-request, and avoids being
forced to create a new request logger before calling the function (due
to the call to `log.end()`), which was error-prone and hardly
maintainable.
2024-03-13 08:39:35 -07:00
Jonathan Gramain fcdfa889be ARSN-402 bump werelogs dependency
+ typescript fixes to be compatible with the latest werelogs
2024-03-13 08:39:35 -07:00
Mickael Bourgois 5b8fcf0313
ARSN-401: Bump version 2024-03-08 14:11:30 +01:00
Mickael Bourgois bdfde26fe4
Merge remote-tracking branch 'origin/improvement/ARSN-401-cluster-rpc-primary' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-03-08 14:11:06 +01:00
Mickael Bourgois e53613783a
Merge remote-tracking branch 'origin/development/8.1' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-03-08 14:10:12 +01:00
Mickael Bourgois 69dbbb143a
Merge branch 'development/7.70' into improvement/ARSN-401-cluster-rpc-primary 2024-03-08 14:08:52 +01:00
Mickael Bourgois 403c4e5040
ARSN-401: Bump version 2024-03-08 14:07:24 +01:00
Nicolas Humbert a1dc2bd84d Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-403/bump' into w/8.1/bugfix/ARSN-403/bump 2024-03-06 16:40:02 +01:00
Nicolas Humbert 01409d690c Merge remote-tracking branch 'origin/bugfix/ARSN-403/bump' into w/7.70/bugfix/ARSN-403/bump 2024-03-06 16:31:42 +01:00
Nicolas Humbert 9ee40f343b ARSN-403 bump package 2024-03-06 16:07:08 +01:00
bert-e 77ed018b4f Merge branch 'w/7.70/bugfix/ARSN-403/fix-put-metadata-2' into tmp/octopus/w/8.1/bugfix/ARSN-403/fix-put-metadata-2 2024-03-05 12:41:44 +00:00
bert-e f77700236f Merge branch 'bugfix/ARSN-403/fix-put-metadata-2' into tmp/octopus/w/7.70/bugfix/ARSN-403/fix-put-metadata-2 2024-03-05 12:41:44 +00:00
Nicolas Humbert 43ff16b28a ARSN-403 fix tests 2024-03-05 13:41:27 +01:00
bert-e 05c628728d Merge branch 'w/7.70/bugfix/ARSN-403/fix-put-metadata-2' into tmp/octopus/w/8.1/bugfix/ARSN-403/fix-put-metadata-2 2024-03-04 13:23:08 +00:00
Nicolas Humbert 2a807dc4ef Merge remote-tracking branch 'origin/bugfix/ARSN-403/fix-put-metadata-2' into w/7.70/bugfix/ARSN-403/fix-put-metadata-2 2024-03-04 14:21:11 +01:00
Nicolas Humbert 1f8b0a4032 ARSN-403 Set nullVersionId to master when replacing a null version. 2024-03-04 11:51:33 +01:00
bert-e 0dd7fe9875 Merge branch 'improvement/ARSN-401-cluster-rpc-primary' into tmp/octopus/w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-02-29 08:58:13 +00:00
Mickael Bourgois f7a6af8d9a
ARSN-401: Test clusterRPC fix error response code
In case a regular error without code is thrown
2024-02-29 09:57:30 +01:00
Mickael Bourgois e6d0eff1a8
Merge remote-tracking branch 'origin/improvement/ARSN-401-cluster-rpc-primary' into w/8.1/improvement/ARSN-401-cluster-rpc-primary 2024-02-28 01:52:02 +01:00
Mickael Bourgois 9d558351e7
ARSN-401: Test new RPC communication 2024-02-27 21:05:28 +01:00
Mickael Bourgois 68150da72e
ARSN-401: add errorCode in cluster RPC for scuba 2024-02-27 21:04:57 +01:00
Mickael Bourgois 2b2c4bc50e
ARSN-401: Bump werelogs for types 2024-02-26 18:46:20 +01:00
Mickael Bourgois 3068086a97
ARSN-401: Fix werelogs config in cluster RPC
Also note that there are some arsenal modules that
have some side effect by being imported as they reconfigure
the werelogs logLevel.
Like: lib/storage/data/external/GCP/GcpUtils.js
2024-02-26 18:18:35 +01:00
Mickael Bourgois 0af7eb5530
ARSN-401: Add PRIMARY communication in cluster RPC 2024-02-26 18:17:34 +01:00
bert-e 7e372b7bd5 Merge branches 'w/8.1/improvement/ARSN-400-scuba-admin' and 'q/2224/7.70/improvement/ARSN-400-scuba-admin' into tmp/octopus/q/8.1 2024-02-26 13:59:56 +00:00
bert-e a121810552 Merge branches 'w/7.70/improvement/ARSN-400-scuba-admin' and 'q/2224/7.10/improvement/ARSN-400-scuba-admin' into tmp/octopus/q/7.70 2024-02-26 13:59:54 +00:00
bert-e 9bf1bcc483 Merge branch 'improvement/ARSN-400-scuba-admin' into q/7.10 2024-02-26 13:59:54 +00:00
Nicolas Humbert 06402c6c94 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-392/bump' into w/8.1/bugfix/ARSN-392/bump 2024-02-21 10:11:29 +01:00
Nicolas Humbert a6f3c82827 Merge remote-tracking branch 'origin/bugfix/ARSN-392/bump' into w/7.70/bugfix/ARSN-392/bump 2024-02-21 10:01:01 +01:00
Nicolas Humbert f1891851b3 ARSN-392 version bump 2024-02-21 09:54:30 +01:00
bert-e a1eed4fefb Merge branch 'bugfix/ARSN-392/null7.70' into tmp/octopus/w/8.1/bugfix/ARSN-392/null7.70 2024-02-20 14:22:16 +00:00
Nicolas Humbert 68204448a1 ARSN-392 Fix processVersionSpecificPut
- For backward compatibility (if isNull is undefined), add the nullVersionId field to the master update. The nullVersionId is needed for listing, retrieving, and deleting null versions.

- For the new null key implementation (if isNull is defined): add the isNull2 field and set it to true to specify that the new version is null AND has been put with a Cloudserver handling null keys (i.e., supporting S3C-7352).

- Manage scenarios in which a version is marked with the isNull attribute set to true, but without a version ID. This happens after BackbeatClient.putMetadata() is applied to a standalone null master.
2024-02-20 15:18:44 +01:00
Nicolas Humbert 40e271f7e2 ARSN-392 Import the V0 processVersionSpecificPut from Metadata
This logic is used by CRR replication feature to BackbeatClient.putMetadata on top of a null version
2024-02-20 15:18:05 +01:00
bert-e d8f7f18f5a Merge branches 'w/8.1/bugfix/ARSN-392/null' and 'q/2215/7.70/bugfix/ARSN-392/null' into tmp/octopus/q/8.1 2024-02-20 14:02:12 +00:00
bert-e 5f4d7afefb Merge branch 'bugfix/ARSN-392/null' into q/7.10 2024-02-20 14:02:11 +00:00
bert-e 2482fdfafc Merge branches 'w/7.70/bugfix/ARSN-392/null' and 'q/2215/7.10/bugfix/ARSN-392/null' into tmp/octopus/q/7.70 2024-02-20 14:02:11 +00:00
bert-e e151b3fff1 Merge branch 'w/7.70/bugfix/ARSN-392/null' into tmp/octopus/w/8.1/bugfix/ARSN-392/null 2024-02-20 13:54:33 +00:00
Nicolas Humbert b8bbdbbd81 Merge remote-tracking branch 'origin/bugfix/ARSN-392/null' into w/7.70/bugfix/ARSN-392/null 2024-02-20 14:49:31 +01:00
Nicolas Humbert 46258bca74 ARSN-392 Fix processVersionSpecificPut
- Add the nullVersionId field into the master update. The nullVersionId is needed for listing, retrieving, and deleting null version.

- Manage scenarios in which a version is marked with the isNull attribute set to true, but without a version ID.
It happens after BackbeatClient.putMetadata() is applied to a standalone null master.
2024-02-19 11:42:17 +01:00
williamlardier b6bc11881a Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:26:47 +01:00
williamlardier 648257612b Merge remote-tracking branch 'origin/development/8.1' into w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:26:06 +01:00
williamlardier 7423fac674 Merge remote-tracking branch 'origin/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:25:05 +01:00
williamlardier 9647043a02 ARSN-396: bump project 2024-02-19 09:24:27 +01:00
williamlardier f9e1f91791 Merge remote-tracking branch 'origin/development/7.70' into w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-19 09:23:29 +01:00
williamlardier 9c5bc2bfe0 ARSN-396: bump project 2024-02-19 09:22:23 +01:00
Jonathan Gramain 1a0a981271 Merge remote-tracking branch 'origin/bugfix/ARSN-398-doNotRefreshGapBuildingIfDisabled' into w/8.1/bugfix/ARSN-398-doNotRefreshGapBuildingIfDisabled 2024-02-16 10:04:07 -08:00
bert-e a45b2eb6a4 Merge branch 'w/7.70/improvement/ARSN-400-scuba-admin' into tmp/octopus/w/8.1/improvement/ARSN-400-scuba-admin 2024-02-16 10:29:54 +00:00
bert-e b00378d46d Merge branch 'improvement/ARSN-400-scuba-admin' into tmp/octopus/w/7.70/improvement/ARSN-400-scuba-admin 2024-02-16 10:29:53 +00:00
Mickael Bourgois 2c3bfb16ef
ARSN-400: Add scuba admin actions 2024-02-16 11:18:05 +01:00
Jonathan Gramain c72d8be223 ARSN-398 bump arsenal version 2024-02-15 11:23:53 -08:00
Jonathan Gramain f63cb3c762 bf: ARSN-398 DelimiterMaster: fix when gap building is disabled
- Fix the situation where gap building is disabled by
  `_saveBuildingGap()` but we attempted to reset the building gap state
  anyway.

- Introduce a new state 'Expired' that can be differentiated from
  'Disabled': it makes `getGapBuildingValidityPeriodMs()` return 0
  instead of 'null' to hint the listing backend that it should trigger
  a new listing.
2024-02-15 11:21:25 -08:00
bert-e 15fd621c5c Merge branches 'w/8.1/feature/ARSN-397-gapCacheClear' and 'q/2222/7.70/feature/ARSN-397-gapCacheClear' into tmp/octopus/q/8.1 2024-02-15 19:07:32 +00:00
bert-e effbf63dd4 Merge branch 'feature/ARSN-397-gapCacheClear' into q/7.70 2024-02-15 19:07:32 +00:00
bert-e 285fe2f63b Merge branches 'w/8.1/bugfix/ARSN-394-GapCacheInvalidateStagingGaps' and 'q/2218/7.70/bugfix/ARSN-394-GapCacheInvalidateStagingGaps' into tmp/octopus/q/8.1 2024-02-15 19:07:20 +00:00
bert-e 1d8ebe6a9c Merge branch 'bugfix/ARSN-394-GapCacheInvalidateStagingGaps' into q/7.70 2024-02-15 19:07:20 +00:00
bert-e 00555597e0 Merge branch 'feature/ARSN-397-gapCacheClear' into tmp/octopus/w/8.1/feature/ARSN-397-gapCacheClear 2024-02-15 18:59:42 +00:00
bert-e bddc2ccd01 Merge branch 'bugfix/ARSN-394-GapCacheInvalidateStagingGaps' into tmp/octopus/w/8.1/bugfix/ARSN-394-GapCacheInvalidateStagingGaps 2024-02-15 18:59:33 +00:00
Jonathan Gramain 7908654b51 ft: ARSN-397 GapCache.clear()
Add a clear() method to clear exposed and staging gaps. Retains
invalidating updates for gaps inserted after the call to clear().
2024-02-14 11:36:28 -08:00
Jonathan Gramain 0d7cf8d40a Merge remote-tracking branch 'origin/feature/ARSN-389-optimizeListingWithGapCache' into w/8.1/feature/ARSN-389-optimizeListingWithGapCache 2024-02-14 10:24:17 -08:00
Jonathan Gramain c4c75e976c ARSN-389 DelimiterMaster: v0 format gap skipping
Implement logic in DelimiterMaster to improve efficiency of listings
of buckets in V0 format that have a lot of current delete markers.

A GapCache instance can be attached to a DelimiterMaster instance,
which enables the following:

- Lookups in the cache to be able to restart listing directly beyond
  the cached gaps. It is done by returning FILTER_SKIP code when
  listing inside a gap, which hints the caller (RepdServer) that it is
  allowed to restart a new listing from a specific later key.

- Building gaps and cache them, when listing inside a series of current
  delete markers. This allows future listings to benefit from the gap
  information and skip over them.

An important caveat is that there is a limited time in which gaps can
be built from the current listing: it is a trade-off to guarantee the
validity of cached gaps when concurrent operations may invalidate
them. This time is set in the GapCache instance as `exposureDelayMs`,
and is the time during which concurrent operations are kept in memory
to potentially invalidate future gap creations. Because listings use a
snapshot of the database, they return entries that are older than when
the listing started. For this reason, in order to be allowed to
consistently build new gaps, it is necessary to limit the running time
of listings, and potentially redo periodically new listings (based on
time or number of listed keys), resuming from where the previous
listing stopped, instead of continuing the current listing.
2024-02-14 10:18:02 -08:00
Jonathan Gramain 1266a14253 impr: ARSN-389 change contract of skipping() API
Instead of returning a "prefix" for the listing task to skip over,
directly return the key on which to skip and continue the listing.

It is both more natural as well as needed to implement skipping over
cached "gaps" of deleted objects.

Note that it could even be more powerful to return the type of query
param to apply for the next listing ('gt' or 'gte'), but it would be
more complex to implement with little practical benefit, so instead we
add a null byte at the end of the returned key to skip to, whenever we
want a 'gt' behavior from the returned 'gte' key.

Also in this commit: clarify the API contract and always return
FILTER_ACCEPT when not allowed to skip over low-level listing
contents. A good chunk of the history of listing bugs and workarounds
comes from this confusion.
2024-02-14 10:18:02 -08:00
williamlardier 851c72bd0f ARSN-396: consider action and isImplicit flags in multipeBackend
The new flags are set when IAM returns detailed information about
the actions, whether they are allowed or denied, with the
isImplicit flag. The mergePolicy must be updated to support the
new fields, and do not merge policies that are for different
actions.

Note that this function will consider that any Allow takes
precedence, so this behavior is not changed.
2024-02-14 12:35:22 +01:00
bert-e 722b6ae699 Merge branch 'w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into tmp/octopus/w/8.1/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-14 11:13:29 +00:00
bert-e 29925a15ad Merge branch 'bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend' into tmp/octopus/w/7.70/bugfix/ARSN-396-standardize-actionMapBP-and-chainbackend 2024-02-14 11:13:28 +00:00
williamlardier 6b64f50450 ARSN-396: use request context aciton map for the bucket policies
The S3 Bucket Policies checks must support and evaluate the same
actions as the ones sent to the IAM checks.
Today, we only check a subset of it, so we missed the Versioned
APIs.
2024-02-14 12:02:45 +01:00
Jonathan Gramain 8dc3ba7ca6 bf: ARSN-394 GapCache: invalidate staging gaps
In the GapCache._removeOverlappingGapsBeforeExpose() helper, remove
the gaps from the *staging* set that overlap with any of the staging
or frozen updates, in addition to removing the gaps from the frozen
set.

Without this extra invalidation, it's still possible to have gaps
created within the exposure delay that miss some invalidation,
resulting in stale gaps in the cache.

Modify an existing unit test to cover this case by adding extra wait
time to ensure `_removeOverlappingGapsBeforeExpose()` is called once
after the invalidating update but before the `setGap()` call.
2024-02-13 10:37:40 -08:00
bert-e 3c2283b062 Merge branch 'bugfix/ARSN-393-infiniteLoopInCoalesceGapChain' into tmp/octopus/w/8.1/bugfix/ARSN-393-infiniteLoopInCoalesceGapChain 2024-02-13 18:15:57 +00:00
Jonathan Gramain a6a76acede bf: ARSN-393 infinite loop in GapSet._coalesceGapChain()
The `GapSet._coalesceGapChain()` helper could infinite loop when
encountering a single-key gap (typically as an unchained single gap).
2024-02-12 12:00:04 -08:00
Jonathan Gramain 6a116734a9 ARSN-388 [fixup 8.1] merge fix: add missing files 2024-02-09 10:10:43 -08:00
Jonathan Gramain 9325ea4996 Merge remote-tracking branch 'origin/feature/ARSN-391-gapCache' into w/8.1/feature/ARSN-391-gapCache 2024-02-09 10:00:08 -08:00
Jonathan Gramain 33ba89f0cf Merge remote-tracking branch 'origin/feature/ARSN-388-gapSet' into w/8.1/feature/ARSN-388-gapSet 2024-02-09 09:45:36 -08:00
Jonathan Gramain c67331d350 ft: ARSN-391 GapCache: gap caching and invalidation
Introduce a new helper class GapCache that sits on top of a set of
GapSet instances, that delays exposure of gaps by a specific time to
guarantee atomicity wrt. invalidation from overlapping PUT/DELETE
operations.

The way it is implemented is the following:

- three update sets are used, each containing a GapSet instance and a
  series of key update batches: `staging`, `frozen`, and `exposed`

- `staging` receives the new gaps from `setGap()` calls and the
  updates from `removeOverlappingGaps()`

- `lookupGap()` only returns gaps present in `exposed`

- every `exposureDelayMs` milliseconds, the following happens:

  - the `frozen` gaps get invalidated by all key updates buffered in
    either `staging` or `frozen` update sets

  - the remainder of the `frozen` gaps is merged into `exposed` (via
    internal calls to `exposed.setGap()`)

  - the `staging` update set becomes the new `frozen` update set (both
    the gaps and the key updates)

  - a new `staging` update set is instanciated, empty

This guarantees that any gap set via `setGap()` is only exposed after
a minimum of `exposureDelayMs`, and a maximum of twice that time (plus
extra needed processing time). Also, keys passed to
`removeOverlappingGaps()` are kept in memory for at least `exposureDelayMs`
so they can invalidate new gaps that are created in this time frame.

This combined with insurance that setGap() is never called after
`exposureDelayMs` has passed since the listing process started (from a
DB snapshot), guarantees that all gaps not yet exposed have been
invalidated by any overlapping PUT/DELETE operation, hence exposed
gaps are still valid at the time they are exposed. They may still be
invalidated thereafter by future calls to removeOverlappingGaps().

The number of gaps that can be cached is bounded by the 'maxGaps'
attribute. The current strategy consists of simply not adding new gaps
when this limit is reached, solely relying on removeOverlappingGaps()
to make room for new gaps. In the future we could consider
implementing an eviction mechanism to remove less used gaps and/or
with smaller weights, but today the cost vs. benefit of doing this is
unclear.
2024-02-09 09:34:37 -08:00
Jonathan Gramain 6d6f1860ef ft: ARSN-388 implement GapSet (caching of listing gaps)
The GapSet class is intended for caching listing "gaps", which are
contiguous series of current delete markers in buckets, although the
semantics can allow for other uses in the future.

The end goal is to increase the performance of listings on V0 buckets
when a lot of delete markers are present, as a temporary solution
until buckets are migrated to V1 format.

This data structure is intented to be used by a GapCache instance,
which implements specific caching semantics (to ensure consistency
wrt. DB updates for example).
2024-02-09 09:32:49 -08:00
Nicolas Humbert cbe6a5e2d6 ARSN-392 Import the V0 processVersionSpecificPut from Metadata
This logic is used by CRR replication feature to BackbeatClient.putMetadata on top of a null version
2024-02-07 16:19:41 +01:00
Mickael Bourgois be1557d972
ARSN-390: Bump version 2024-02-05 20:03:24 +01:00
Mickael Bourgois a03463061c
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-390-scuba-arn' into w/8.1/improvement/ARSN-390-scuba-arn 2024-02-05 20:03:10 +01:00
Mickael Bourgois 8ad0ea73a7
ARSN-390: Bump version 2024-02-05 17:45:22 +01:00
Mickael Bourgois a94040d13b
Merge remote-tracking branch 'origin/improvement/ARSN-390-scuba-arn' into w/7.70/improvement/ARSN-390-scuba-arn 2024-02-05 17:45:06 +01:00
Mickael Bourgois f265ed6122
ARSN-390: Bump version 2024-02-05 14:07:31 +01:00
Mickael Bourgois 7301c706fd
ARSN-390: Apply suggestion from code review 2024-02-05 14:07:31 +01:00
Mickael Bourgois bfc8dee559
ARSN-390: Add scuba arn for policy
Relates to SCUBA-76 and SCUBA-77
2024-01-26 16:33:32 +01:00
Frédéric Meinnel 5a5ef7c572 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests' into w/8.1/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 13:15:43 +01:00
Frédéric Meinnel 918c2c5473 Merge remote-tracking branch 'origin/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests' into w/7.70/bugfix/ARSN-386/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 12:25:28 +01:00
Frédéric Meinnel 29f39ab480 ARSN-386: version bump 2024-01-19 11:07:20 +01:00
Frédéric Meinnel b7ac7f4616 ARSN-385: Fix generateV4Headers for HTTP PUT with body 2024-01-19 11:07:20 +01:00
Frédéric Meinnel f8ce90f9c3 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.1/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-16 17:58:09 +01:00
Frédéric Meinnel 5734d11cf1 Merge remote-tracking branch 'origin/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates' into w/7.70/bugfix/ARSN-385/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-16 17:47:02 +01:00
Frédéric Meinnel 4da59769d2 ARSN-385: Version bump 2024-01-16 17:40:34 +01:00
Frédéric Meinnel 60573991ee ARSN-385: Lifecycle configuration dates aligned with XML spec and ISO-8601 2024-01-12 18:45:24 +01:00
Jonathan Gramain 6f58f9dd68 Merge remote-tracking branch 'origin/improvement/ARSN-381-cluster-rpc-helpers' into w/8.1/improvement/ARSN-381-cluster-rpc-helpers 2024-01-11 16:34:37 -08:00
Jonathan Gramain 3b9c93be68 ARSN-381 bump arsenal version 2024-01-11 16:26:33 -08:00
Jonathan Gramain 081af3e795 ARSN-381 RPC command system between cluster workers
When using the cluster module, new processes are forked and are
dispatched workloads, usually HTTP requests. The ClusterRPC module
implements a RPC system to send commands to all cluster worker
processes at once from any particular worker, and retrieve their
individual command results, like a distributed map operation.

The existing cluster IPC channel is setup from the primary to each
worker, but not between workers, so there has to be a hop by the
primary.

How a command is treated:

- a worker sends a command message to the primary

- the primary then forwards that command to each existing worker
  (including the requestor)

- each worker then executes the command and returns a result or an
  error

- the primary gathers all workers results into an array

- finally, the primary dispatches the results array to the original
  requesting worker callback

The original use of this feature is in Metadata DBD (bucketd) to
implement a global cache refresh across worker processes.
2024-01-11 16:26:33 -08:00
bert-e 042f541a45 Merge branches 'w/8.1/bugfix/ARSN-384-redirect-error-body' and 'q/2207/7.70/bugfix/ARSN-384-redirect-error-body' into tmp/octopus/q/8.1 2024-01-10 10:23:22 +00:00
bert-e 63bf2cb5b1 Merge branch 'bugfix/ARSN-384-redirect-error-body' into q/7.10 2024-01-10 10:23:21 +00:00
bert-e 39f42d9cb4 Merge branches 'w/7.70/bugfix/ARSN-384-redirect-error-body' and 'q/2207/7.10/bugfix/ARSN-384-redirect-error-body' into tmp/octopus/q/7.70 2024-01-10 10:23:21 +00:00
Mickael Bourgois 02f126f040
ARSN-384: fix after merge 8.1 param name 2024-01-10 11:15:38 +01:00
bert-e 1477a70e47 Merge branch 'w/7.70/bugfix/ARSN-384-redirect-error-body' into tmp/octopus/w/8.1/bugfix/ARSN-384-redirect-error-body 2024-01-10 09:51:16 +00:00
Mickael Bourgois 7233ec2635
Merge remote-tracking branch 'origin/bugfix/ARSN-384-redirect-error-body' into w/7.70/bugfix/ARSN-384-redirect-error-body 2024-01-10 10:50:15 +01:00
Mickael Bourgois c4b44016bc
ARSN-384: bump version 2024-01-10 10:46:26 +01:00
Mickael Bourgois a78a84faa7
ARSN-384: update error check 2024-01-10 10:46:26 +01:00
Mickael Bourgois c3ff6526a1
ARSN-384: ignore 302 statusMessage override
Keep Found instead of Moved Temporarily
And apply code review suggestion
2024-01-10 10:46:26 +01:00
Frédéric Meinnel 59d47a3e21 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight' into w/8.1/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight 2024-01-09 10:35:12 +01:00
Frédéric Meinnel 6b61347c29 Merge remote-tracking branch 'origin/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight' into w/8.1/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight 2024-01-08 18:22:57 +01:00
Mickael Bourgois 4bf29524eb
ARSN-384: test redirect on error 2024-01-08 17:49:22 +01:00
Mickael Bourgois 9aa001c4d1
ARSN-384: implement a redirect with error and body 2024-01-08 17:49:22 +01:00
Frédéric Meinnel aea4663ff2 Merge remote-tracking branch 'origin/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight' into w/7.70/bugfix/ARSN-383-lifecycle-configuration-dates-must-be-set-to-midnight 2024-01-08 15:47:01 +01:00
Frédéric Meinnel 5012e9209c ARSN-383: Version bump 2024-01-08 15:28:06 +01:00
Frédéric Meinnel 1568ad59c6 ARSN-383: Dates must now be set to midnight for lifecycle configurations. 2024-01-08 15:27:23 +01:00
bert-e c2f6b45116 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-03 08:52:09 +00:00
bert-e a0322b131c Merge branch 'bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/7.70/bugfix/ARSN-382-redirect-root-empty 2024-01-03 08:52:08 +00:00
Mickael Bourgois b5487e3c94
ARSN-382: add unit tests for redirect request 2024-01-03 09:51:20 +01:00
bert-e 993b9e6093 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-02 18:09:07 +00:00
bert-e ddd6c87831 Merge branch 'bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/7.70/bugfix/ARSN-382-redirect-root-empty 2024-01-02 18:09:06 +00:00
Mickael Bourgois f2974cbd07
ARSN-382: update redirect location condition
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-02 19:08:59 +01:00
bert-e 7440794d93 Merge branch 'w/7.70/bugfix/ARSN-382-redirect-root-empty' into tmp/octopus/w/8.1/bugfix/ARSN-382-redirect-root-empty 2024-01-02 10:53:55 +00:00
Mickael Bourgois 1efab676bc
Merge remote-tracking branch 'origin/bugfix/ARSN-382-redirect-root-empty' into w/7.70/bugfix/ARSN-382-redirect-root-empty
# Conflicts:
#	package.json
2024-01-02 11:53:05 +01:00
Mickael Bourgois a167e1d5fa
ARSN-382: bump version 2024-01-02 11:17:55 +01:00
Mickael Bourgois c7e153917a
ARSN-382: fix empty location when redirect to /
If object has a redirect to / it is sliced out
and the function receives an empty string as redirectKey
Therefore if redirectLocation consists of a single character /
The Location header would be empty
2024-01-02 10:52:50 +01:00
bert-e 087369b37d Merge branches 'w/8.1/improvement/ARSN-363-retention-day-condition' and 'q/2191/7.70/improvement/ARSN-363-retention-day-condition' into tmp/octopus/q/8.1 2023-12-26 10:55:59 +00:00
bert-e 2d2030dfe4 Merge branches 'w/7.70/improvement/ARSN-363-retention-day-condition' and 'q/2191/7.10/improvement/ARSN-363-retention-day-condition' into tmp/octopus/q/7.70 2023-12-26 10:55:58 +00:00
bert-e 45cc4aa79e Merge branch 'improvement/ARSN-363-retention-day-condition' into q/7.10 2023-12-26 10:55:58 +00:00
Will Toozs da80e12dab
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-363-retention-day-condition' into w/8.1/improvement/ARSN-363-retention-day-condition 2023-12-26 11:49:28 +01:00
Will Toozs a7cf94d0fe
Merge remote-tracking branch 'origin/improvement/ARSN-363-retention-day-condition' into w/7.70/improvement/ARSN-363-retention-day-condition 2023-12-26 11:47:28 +01:00
Jonathan Gramain 2a82095d03 ARSN-379 [8.1] bump arsenal version 2023-12-22 12:41:17 -08:00
Jonathan Gramain 44b3d25459 ARSN-379 [8.1] adapt skipping delete markers in DelimiterMaster
With the MongoDB implementation there may be delete markers in the
masters prefix to go through.

Replace the original implementation for this by a new implementation
compatible with the latest DelimiterMaster changes.

Note: changed the returned value from FILTER_SKIP to FILTER_ACCEPT:
this is the correct logic as there is no range to skip, only the key
shouldn't be added to the results.
2023-12-22 12:41:01 -08:00
Jonathan Gramain f1d6e30fb6 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293' into w/8.1/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293 2023-12-22 12:40:18 -08:00
Jonathan Gramain 9186643caa ARSN-379 [7.70] bump arsenal version 2023-12-22 12:35:57 -08:00
Jonathan Gramain 485a76ceb9 ARSN-379 [7.70] import FilterState and FilterReturnValue types from Delimiter 2023-12-22 12:35:44 -08:00
Jonathan Gramain 00109a2c44 ARSN-379 [7.70] adapt `DelimiterCurrent` to changes in `Delimiter`/`DelimiterMaster`
The internals of `DelimiterMaster` have changed with S3C-4682
implementation, which requires changes in the `DelimiterCurrent` class
that inherits from it.

Removed the unit test passing a key with a different prefix, because
the prefix check was removed in `DelimiterMaster` as no such key can
be passed by construction of the listing parameters.
2023-12-22 12:35:44 -08:00
Jonathan Gramain aed1247825 Merge remote-tracking branch 'origin/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293' into w/7.70/bugfix/ARSN-379-cherry-pick-ARSN-284-and-ARSN-293 2023-12-22 12:35:34 -08:00
Jonathan Gramain 0507c04ce9 ARSN-284 bump arsenal version 2023-12-22 12:13:09 -08:00
Will Toozs 62736abba4
ARSN-363: update package version 2023-12-21 17:24:59 +01:00
Will Toozs 97118f09c4
ARSN-363: update test 2023-12-21 17:24:46 +01:00
Will Toozs 5a84a8c0ad
ARSN-363: add object retention days logic to structures 2023-12-21 17:24:34 +01:00
bert-e 37234efd14 Merge branch 'improvement/ARSN-380-delimiterVersionsInheritFromExtension' into tmp/octopus/w/8.1/improvement/ARSN-380-delimiterVersionsInheritFromExtension 2023-12-20 20:01:59 +00:00
Jonathan Gramain 2799381ef2 ARSN-380 rf: DelimiterVersions class inherits from Extension
Small refactor of DelimiterVersions class to inherit from the base
class Extension rather than Delimiter. Copy the missing fields and
methods from `Delimiter`.

This prepares for merging ARSN-379 which would otherwise cause a lot
of incompatibilities due to changes in the interface of
`DelimiterVersions` from S3C-8242.

Other minor tweaks:

- reset `nextVersionIdMarker` when skipping a common prefix

- rename `this.Contents` to `this.Versions` as we don't need to keep
  compatibility with `Delimiter`, and as it is the name used in the
  final result
2023-12-20 11:57:57 -08:00
Jonathan Gramain a3f13e5387 ARSN-284 fix and refactor Delimiter + DelimiterMaster
Large refactor of Delimiter and DelimiterMaster classes to typescript,
that fixes most known issues with the previous implementation.

The new implementation uses explicit states to manage various
conditions, instead of relying on a bunch of internal variable values
and maintaining their state. It allows a more robust code flow and
fixes issues related to prefix skipping that were hard to fix by
keeping the overall logic of the previous implementation.

This refactor brings the following bug fixes and enhancements:

- prefixes with delete markers and non-deleted objects are
  now always included in CommonPrefixes (S3C-7248)

- no more duplication of internal range listings when doing skip-scan
  over prefixes (discovered when analyzing regressions for S3C-4682)

- the skip-scan mecanism for prefixes and versions is no
  more disturbed by delete markers and PHD keys (S3C-2930)

- NextMarker is now always set to a valid, listed or listable key
  (that may still be hidden under a CommonPrefix), no more
  manipulation of next marker to avoid corner-cases with keys ending
  with a prefix (S3C-4682 and S3C-7274)

- deleting a delete marker immediately allows the new current version
  to be visible in the listing (S3C-7272)

- Expecting lower CPU usage overall, as the number of checks to do in
  each state is reduced (may help to reduce the load and reduce impact
  of cases such as S3C-3946)

- Uses typescript to allow more sanity checks

This bugfix and refactor work has been re-integrated in the code by
cherry-picking the following commits:

- f62c3d22 ARSN-252 - listing bug in DelimisterMaster
- 87b060f2 ARSN-269 - listing bug in versioned bucket edge cases.
- 4f0a8468 ARSN-284 [cleanup] remove unused test dependency
- 7b648962 ARSN-284 [rf] delimiterVersions.addCommonPrefix()
- 4d7eaee0 ARSN-284 fix and refactor Delimiter + DelimiterMaster
- 1c07618b ARSN-284 [doc] add state charts
- fbb62ef1 bugfix: ARSN-293 DelimiterMaster: default to vFormat=v0
- 6e5d8d14 bugfix: ARSN-294 use CommonPrefix for NextMarker
2023-12-18 18:13:21 -08:00
Jonathan Gramain f4e83086d6 Merge remote-tracking branch 'origin/bugfix/ARSN-377-v1NullKeyDeleteMarkerNotInCommonPrefixes' into w/8.1/bugfix/ARSN-377-v1NullKeyDeleteMarkerNotInCommonPrefixes 2023-12-14 14:54:24 -08:00
Jonathan Gramain d08a267965 ARSN-377 bump arsenal version 2023-12-14 14:52:11 -08:00
Jonathan Gramain 063a2fb8fb ARSN-377 fix DelimiterNonCurrent and add a unit test 2023-12-14 14:51:48 -08:00
Jonathan Gramain 1bc3360daf ARSN-377 correctly handle null keys with common prefix
When encountering a null key, check for its common prefix before
including it in either the Versions array or CommonPrefixes array,
instead of always including it in the Versions array.

This commit refactors how `DelimiterVersions` works with null keys
slightly: the null key is now inserted at its correct ordered position
by the top-level `filter()` method, and the state machine handlers
only have to deal with sorted versions. Previously the individual
handlers would have to deal with the null key positioning themselves
resulting in more complex state management.
2023-12-14 14:12:26 -08:00
Jonathan Gramain 206f14bdf5 ARSN-377 improve versioned listing test
Add version IDs to delete marker metadata
2023-12-14 14:12:26 -08:00
Maha Benzekri 74ff1691a0
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-378-BP-authorization' into w/8.1/improvement/ARSN-378-BP-authorization 2023-12-14 11:58:47 +01:00
Maha Benzekri 5ffae72693
Merge remote-tracking branch 'origin/improvement/ARSN-378-BP-authorization' into w/7.70/improvement/ARSN-378-BP-authorization 2023-12-14 11:57:20 +01:00
Maha Benzekri 477a574500
ARSN-378: bump ARSN version 2023-12-14 11:55:54 +01:00
bert-e 2a4ea38301 Merge branch 'w/7.70/improvement/ARSN-378-BP-authorization' into tmp/octopus/w/8.1/improvement/ARSN-378-BP-authorization 2023-12-14 10:55:37 +00:00
bert-e df4c22154e Merge branch 'improvement/ARSN-378-BP-authorization' into tmp/octopus/w/7.70/improvement/ARSN-378-BP-authorization 2023-12-14 10:55:36 +00:00
Maha Benzekri 3642ac03b2
ARSN-378: adding missing authorizations to actionMapBP 2023-12-14 11:52:39 +01:00
Francois Ferrand d800179f86
Release arsenal 8.1.115
Issue: ARSN-374
2023-12-01 17:28:59 +01:00
Francois Ferrand c1c45a4af9
gha: upgrade actions
Issue: ARSN-374
2023-12-01 17:27:41 +01:00
Francois Ferrand da536ed037
ObjectMD: Add transition time
Store transition time when marking the object as ‘transition in
progress’. This is used to compute metrics on the duration of transition.

Issue: ARSN-374
2023-12-01 17:27:41 +01:00
Nicolas Humbert 06901104e8 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-376/probe' into w/8.1/bugfix/ARSN-376/probe 2023-12-01 13:38:36 +01:00
Nicolas Humbert a99a6d9d97 Merge remote-tracking branch 'origin/bugfix/ARSN-376/probe' into w/7.70/bugfix/ARSN-376/probe 2023-12-01 11:36:09 +01:00
Nicolas Humbert 06244059a8 bump version 2023-11-30 14:48:07 +01:00
Nicolas Humbert 079f631711 ARSN-376 Probe response logic should be handled in the handler
Currently, the probe response logic is distributed between Backbeat probe handlers and Arsenal's onRequest method.

This scattered approach causes confusion for developers and results in bugs.

The solution is to centralize the probe response logic exclusively within the Backbeat probe handlers.
2023-11-30 14:39:42 +01:00
Benoit A. 863f45d256
ARSN-373 bump hdclient to 1.1.7 2023-11-20 16:52:41 +01:00
KillianG 4b642cf8b4
Add custom listing parser to MongoDB listObject
test to check for location param is absent

Issue: ARSN-372
2023-11-17 17:45:10 +01:00
KillianG 2537f8aa9a
Exclude location field from search query in MongoReadStream.
Issue: ARSN-372
2023-11-13 11:07:43 +01:00
Maha Benzekri 7866a1d06f
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-362-implicitDeny' into w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 16:55:21 +01:00
Maha Benzekri 29ef2ef265
fixup 2023-10-30 16:51:41 +01:00
Maha Benzekri 1509f1bdfe
fix 2023-10-30 16:47:32 +01:00
Maha Benzekri 13d349d211
fix 2023-10-30 16:40:00 +01:00
Maha Benzekri 34a32c967d
Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-362-implicitDeny' into w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 16:38:08 +01:00
Maha Benzekri 90ab985271
Merge remote-tracking branch 'origin/improvement/ARSN-362-implicitDeny' into w/7.70/improvement/ARSN-362-implicitDeny 2023-10-30 16:35:32 +01:00
Maha Benzekri fbf5562a11
bump arsenal version 2023-10-30 16:08:14 +01:00
bert-e d79ed1b9c8 Merge branch 'w/7.70/improvement/ARSN-362-implicitDeny' into tmp/octopus/w/8.1/improvement/ARSN-362-implicitDeny 2023-10-30 15:01:06 +00:00
bert-e c34ad0dc31 Merge branch 'improvement/ARSN-362-implicitDeny' into tmp/octopus/w/7.70/improvement/ARSN-362-implicitDeny 2023-10-30 15:01:06 +00:00
Maha Benzekri df5ff0f400
ARSN-362:fixups on impl deny policy tests
As the evaluateAllPolicies function is using the result of the
standardEvaluateAllPolicies , the redundant tests are removed.
The test that was kept is only to show that we use the result.verdict
in old flow evaluation.
2023-10-30 14:30:28 +01:00
Maha Benzekri 777783171a
ARSN-362: change new function name for clarity 2023-10-30 09:36:56 +01:00
Will Toozs 39988e52e2
ARSN-362: add implicit deny logic to policy eval tests 2023-10-27 17:23:36 +02:00
Will Toozs 79c82a4c3d
ARSN-362: add implicit deny logic to policy evaluation 2023-10-27 17:22:20 +02:00
williamlardier 17b5bbc233 ARSN-370: bump project version 2023-10-06 09:14:13 +02:00
williamlardier 4aa8b5cc6e ARSN-370: handle error cases 2023-10-06 09:13:46 +02:00
williamlardier 5deed6c2e1 ARSN-370: fix memory leak
The MongoDBReadStreams are not properly destroyed in both the
Bucket V1 and V0 cases. In the V1 case, only the pipe-ed stream,
the Transform one, is cleaned. In the V0 case, we directly call
the callback without properly cleaning the stream, leaving open,
in both cases, the mongodb cursors, that in turn affect the
mongos memory consumption.
2023-10-06 09:13:46 +02:00
Nicolas Humbert af34571771 Merge remote-tracking branch 'origin/bugfix/ARSN-369/skip' into w/8.1/bugfix/ARSN-369/skip 2023-10-05 11:49:01 +02:00
Nicolas Humbert 79b83a9067 ARSN-369 orphan delete marker list interruption skips processed key
In the event of a listing interruption due to reaching the maximum scanned entries, the next “orphan delete marker“ listing skips the currently processed key.
2023-10-05 09:39:45 +02:00
Nicolas Humbert 5fd675a316 Merge remote-tracking branch 'origin/improvement/ARSN-366/listing-scanned-limit' into w/8.1/improvement/ARSN-366/listing-scanned-limit 2023-09-27 17:22:45 +02:00
Nicolas Humbert d84cc974d3 ARSN-366 Limit lifecycle listing on scanned entries 2023-09-27 17:19:03 +02:00
Maha Benzekri dcf0f902ff
Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-367-principal-user-arn-on-policy' into w/8.1/bugfix/ARSN-367-principal-user-arn-on-policy 2023-09-25 12:20:06 +02:00
Maha Benzekri 0177fbe98f
Merge remote-tracking branch 'origin/bugfix/ARSN-367-principal-user-arn-on-policy' into w/7.70/bugfix/ARSN-367-principal-user-arn-on-policy 2023-09-25 12:17:43 +02:00
Maha Benzekri f49cea3914
ARSN-367- bump ARSN version 2023-09-25 12:05:46 +02:00
Maha Benzekri 73c6f41fa3
ARSN-367:principal change on schema and test add
The maximum length should be 2048 characters
having 31 characters on the fixed length prefix
this explains the 2017 max limit put in the schema
2023-09-15 10:27:47 +02:00
bert-e 5b66f8d089 Merge branch 'w/7.70/bugfix/ARSN-365-id-on-resource-policy' into tmp/octopus/w/8.1/bugfix/ARSN-365-id-on-resource-policy 2023-09-13 06:27:36 +00:00
bert-e b61d178b18 Merge branch 'bugfix/ARSN-365-id-on-resource-policy' into tmp/octopus/w/7.70/bugfix/ARSN-365-id-on-resource-policy 2023-09-13 06:27:35 +00:00
Maha Benzekri 9ea39c6ed9
ARSN-365:Id added on policy schema and validator
Signed-off-by: Maha Benzekri <maha.benzekri@scality.com>
2023-09-12 21:01:45 +02:00
Florent Monjalet e51b06cfea ARSN-364: bump arsenal to 8.1.109 2023-08-31 18:46:36 +02:00
Florent Monjalet f2bc701f8c ARSN-364: bump sproxydclient to 8.0.10 (for SPRXCLT-12) 2023-08-31 18:46:06 +02:00
Nicolas Humbert 4d6b03ba47 ARSN-360 bump package version 2023-08-11 13:31:22 -04:00
Nicolas Humbert f03f049683 ARSN-360 Test enable V0 bucket format for Artesca lifecycle listing 2023-08-11 12:37:25 -04:00
Nicolas Humbert d7b51de024 ARSN-360 Enable V0 bucket format for Artesca lifecycle listing 2023-08-11 08:30:55 -04:00
Nicolas Humbert cf51adf1c7 Merge remote-tracking branch 'origin/bugfix/ARSN-359/max-keys' into w/8.1/bugfix/ARSN-359/max-keys 2023-08-08 19:59:22 -04:00
Nicolas Humbert 8a7c1be2d1 ARSN-359 bump arsenal version 2023-08-08 19:50:42 -04:00
Nicolas Humbert c049df0a97 ARSN-359 Fix NextMarker calculation in listLifecycleCurrent
Please note that there are no missing entries in the listing and no extra resource used since the next listing will do the fetching anyway. The issue lies in how we determine the NextMarker. It has to be compatible with the current logic merged in Artesca.

When using the listLifecycleCurrent function, we need to calculate the NextMarker correctly. Currently, if the maximum number of keys (max-keys) is reached, the function continues fetching more entries, which is unnecessary and should be done by the next listing.

For instance, if max-keys is set to 1 and the first entry (key0) is eligible, while the following two entries (key1 and key2) are not eligible, but the fourth entry (key3) is eligible, the listing should stop at key0 and the NextMarker should be key0 instead the listing keep fetching until key3 and return the NextMarker key2.
2023-08-08 19:50:12 -04:00
Nicolas Humbert 2b2667e29a Merge remote-tracking branch 'origin/improvement/ARSN-358/bump' into w/8.1/improvement/ARSN-358/bump 2023-08-08 13:16:11 -04:00
Nicolas Humbert 8eb4a29c36 ARSN-358 bump version 2023-08-08 13:12:22 -04:00
bert-e 862317703e Merge branch 'improvement/ARSN-356/list-orphan-delete-marker-v0' into tmp/octopus/w/8.1/improvement/ARSN-356/list-orphan-delete-marker-v0 2023-08-04 21:25:02 +00:00
Nicolas Humbert e69a97f240 add comment about this.start 2023-08-04 17:24:07 -04:00
Nicolas Humbert 81e838000f ARSN-356 List lifecycle orphan delete markers supports V0 2023-08-04 17:24:03 -04:00
bert-e 547ce816e0 Merge branch 'improvement/ARSN-355/list-non-current-v0' into tmp/octopus/w/8.1/improvement/ARSN-355/list-non-current-v0 2023-08-04 17:03:23 +00:00
Nicolas Humbert 8256d6debf ARSN-355 List lifecycle non-current versions supports V0 2023-08-04 13:02:35 -04:00
bert-e 15d5e93a2d Merge branch 'improvement/ARSN-354/list-current-v0' into tmp/octopus/w/8.1/improvement/ARSN-354/list-current-v0 2023-08-01 15:56:22 +00:00
Nicolas Humbert 69c1698eb7 ARSN-354 List lifecycle current versions supports V0 bucket format 2023-08-01 11:53:37 -04:00
bert-e d11bcb56e9 Merge branch 'improvement/ARSN-352/list-current' into tmp/octopus/w/8.1/improvement/ARSN-352/list-current 2023-08-01 14:16:19 +00:00
Nicolas Humbert c2cd90925f Adapt delimiterCurrent for S3C Metadata 2023-08-01 10:09:26 -04:00
bert-e 0ed35c3d86 Merge branch 'q/2151/7.70/improvement/ARSN-351/backport' into tmp/normal/q/8.1 2023-07-21 16:40:33 +00:00
bert-e b1723594eb Merge branch 'improvement/ARSN-351/backport' into q/7.70 2023-07-21 16:40:31 +00:00
Nicolas Humbert c0218821ff Merge remote-tracking branch 'origin/improvement/ARSN-351/backport' into w/8.1/improvement/ARSN-351/backport 2023-07-21 12:30:02 -04:00
Nicolas Humbert 49e32758fb ARSN-351 cleanup MongoDB tests 2023-07-21 08:29:16 -04:00
Nicolas Humbert e13d0f5ed8 ARSN-351 support listLifecycleObject in BucketFileInterface 2023-07-21 08:29:16 -04:00
Nicolas Humbert 0d5907956f ARSN-351 export DelimiterNonCurrent and DelimiterOrphanDeleteMarker for Metadata 2023-07-21 08:29:16 -04:00
Nicolas Humbert f0c5d60ce9 ARSN-351 export DelimiterCurrent for Metadata 2023-07-21 08:29:16 -04:00
Nicolas Humbert 8c2f4cf357 ARSN-351 support listLifecycleObject in BucketClientInterface 2023-07-21 08:29:16 -04:00
Nicolas Humbert f3f1da9bb3 ARSN-350 Missing Null Version in Lifecycle List of Non-Current Versions
Note: We only support the v1 bucket format for "list lifecycle" in Artesca.

We made the assumption that the first version key stored the current/latest version, which is true in most cases except for "null" versions. In the case of a "null" version, the current version is stored in the master key alone, rather than being stored in both the master key and a new version key. Here's an example of the key structure:

Mkey0: Represents the null version ID.
VKey0<versionID>: Represents a non-current version.

Additionally, we assumed that the versions for a given key were ordered by creation date, from newest to oldest. However, in Ring S3C, for non-current null versions, the metadata version ID is not part of the metadata key id. Therefore, the non-current null version is listed before the current version that has a version ID. Here's an example of the key ordering:

Mkey0: Master version
Vkey0: "null" non-current version
VKey0<versionID>: Current version

The listing was using only versions, but because those assumptions are incorrect, we now use both the master and the versions for each given key to ensure that we return the correct non-current versions.

(cherry picked from commit 0a4d6f862f)
2023-07-21 08:29:16 -04:00
Nicolas Humbert 036b75842e ARSN-328 Exclude keys based on their dataStoreName
(cherry picked from commit e216c9dd20)
2023-07-21 08:29:16 -04:00
Nicolas Humbert 7ac5774635 ARSN-312 Add logic to list orphan delete markers for Lifecycle
DelimiterOrphan used for listing orphan delete marker.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the delete markers with zero noncurrent versions before a defined date: beforeDate.

(cherry picked from commit c9a444969b)
2023-07-17 09:06:23 -04:00
Nicolas Humbert f3b928fce0 ARSN-311 Add logic to list non-current versions for Lifecycle
DelimiterNonCurrent used for listing non-current version.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the non-current versions that became non-current before a defined date: beforeDate.

(cherry picked from commit 5d018860ec)
2023-07-17 09:06:23 -04:00
Nicolas Humbert 7173a357d9 ARSN-326 Lifecycle listings should handle null version
(cherry picked from commit 4be0a06c4a)
2023-07-17 09:06:23 -04:00
Nicolas Humbert 7c4f461196 bump version 2023-07-14 09:20:58 -04:00
Nicolas Humbert 0a4d6f862f ARSN-350 Missing Null Version in Lifecycle List of Non-Current Versions
Note: We only support the v1 bucket format for "list lifecycle" in Artesca.

We made the assumption that the first version key stored the current/latest version, which is true in most cases except for "null" versions. In the case of a "null" version, the current version is stored in the master key alone, rather than being stored in both the master key and a new version key. Here's an example of the key structure:

Mkey0: Represents the null version ID.
VKey0<versionID>: Represents a non-current version.

Additionally, we assumed that the versions for a given key were ordered by creation date, from newest to oldest. However, in Ring S3C, for non-current null versions, the metadata version ID is not part of the metadata key id. Therefore, the non-current null version is listed before the current version that has a version ID. Here's an example of the key ordering:

Mkey0: Master version
Vkey0: "null" non-current version
VKey0<versionID>: Current version

The listing was using only versions, but because those assumptions are incorrect, we now use both the master and the versions for each given key to ensure that we return the correct non-current versions.
2023-07-14 09:20:36 -04:00
bert-e 8716fee67d Merge branch 'q/2134/7.70/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching' into tmp/normal/q/8.1 2023-07-12 11:36:29 +00:00
bert-e 2938bb0c88 Merge branch 'improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching' into q/7.70 2023-07-12 11:36:28 +00:00
williamlardier 05c93446ab
Merge remote-tracking branch 'origin/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching' into w/8.1/improvement/ARSN-345-optimize-multiobjectdelete-api-and-batching 2023-07-12 13:26:01 +02:00
williamlardier 8d758327dd
ARSN-345: bump package version 2023-07-12 13:19:38 +02:00
williamlardier be63c09624
ARSN-345: update tests and logic 2023-07-12 13:19:01 +02:00
Nicolas Humbert 4615875462 ARSN-310 Add logic to list current/master versions for Lifecycle
DelimiterCurrent used for listing current versions. The Metadata call returns the masters (M prefix) younger than a defined date: beforeDate. No extra filtering action is needed on the Metadata call response.

(cherry picked from commit ecd600ac4b)
2023-06-23 08:11:54 -04:00
Rahul Padigela bdb59a0e63 Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-349-update-node-fcntl' into w/8.1/improvement/ARSN-349-update-node-fcntl 2023-06-20 16:34:38 -07:00
bert-e a89d1d8d75 Merge branch 'improvement/ARSN-349-update-node-fcntl' into tmp/octopus/w/7.70/improvement/ARSN-349-update-node-fcntl 2023-06-20 23:12:07 +00:00
Rahul Padigela 89e5f7dffe improvement: ARSN-349 bump node-fcntl 2023-06-20 16:05:12 -07:00
williamlardier 57e84980c8
ARSN-345: optimize InternalDeleteObject with direct deletion support 2023-06-15 13:43:27 +02:00
williamlardier 51bfd41bea
ARSN-345: optimize MultiDeleteObject with batching support 2023-06-15 13:43:27 +02:00
Nicolas Humbert 96cbaeb821 Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-347/socketio' into w/8.1/bugfix/ARSN-347/socketio 2023-06-08 11:46:23 -04:00
Nicolas Humbert cb01346d07 Merge remote-tracking branch 'origin/bugfix/ARSN-347/socketio' into w/7.70/bugfix/ARSN-347/socketio 2023-06-08 11:44:15 -04:00
Nicolas Humbert 3f24336b83 bump arsenal version 2023-06-08 11:39:11 -04:00
Nicolas Humbert 1e66518a79 ARSN-347 socket.io client is disconnected when sending a big payload
The file backend test fails when migrating the socket.io client from version 2.x to 4.x due to a change in the default value of maxHttpBufferSize. In the newer version, the default value has been reduced from 100MB to 1MB, causing the failure when attempting to initiate, put parts, and complete an MPU (Multipart Upload) with 10,000 parts.
2023-06-08 11:38:59 -04:00
bert-e 15b68fa9fa Merge branch 'improvement/ARSN-344/bump' into q/8.1 2023-06-07 14:06:37 +00:00
Nicolas Humbert 51703a65f5 ARSN-344 bump version 2023-06-07 08:58:42 -04:00
bert-e 09aaa2d5ee Merge branch 'improvement/ARSN-339/time-progression-factor' into q/8.1 2023-06-07 12:10:45 +00:00
Nicolas Humbert ad39d90b6f ARSN-339 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-05 17:17:45 -04:00
Jonathan Gramain 20e9fe4adb Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-340-bump-socket-io' into w/8.1/bugfix/ARSN-340-bump-socket-io 2023-05-30 16:06:31 -07:00
bert-e e9c67f7f67 Merge branch 'bugfix/ARSN-340-bump-socket-io' into tmp/octopus/w/7.70/bugfix/ARSN-340-bump-socket-io 2023-05-30 22:49:34 +00:00
Jonathan Gramain af3fd17ec2 bf: ARSN-340 bump socket.io dep to 4.6.1
4.6.1 is the latest version to date of nodejs socket.io module. It
fixes a bunch of CVEs related to socket.io and xmlhttprequest modules
for the open-source metadata storage.
2023-05-30 15:42:24 -07:00
bert-e 536d474f57 Merge branches 'development/8.1' and 'w/7.70/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/8.1/improvement/ARSN-335-implement-ghas 2023-05-25 17:52:46 +00:00
bert-e 55e68cfa17 Merge branch 'w/7.10/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/7.70/improvement/ARSN-335-implement-ghas 2023-05-25 17:52:45 +00:00
bert-e 67c98fd81b Merge branch 'improvement/ARSN-335-implement-ghas' into tmp/octopus/w/7.10/improvement/ARSN-335-implement-ghas 2023-05-25 17:52:45 +00:00
williamlardier 5cd70d7cf1 ARSN-267: fix failing unit test
NodeJS 16.17.0 introduced a change in the error handling of TLS sockets
in case of error. The connexion is closed before the response is sent,
so handling the ECONNRESET error in the affected test will unblock it,
until this is fixed by NodeJS, if appropriate.

(cherry picked from commit a237e38c51)
2023-05-25 17:50:00 +00:00
KillianG 25be9014c9
Bump version 8.1.101 2023-05-25 10:00:14 +00:00
KillianG ed42f24580
Add comment to explain
Issue: ARSN-337
2023-05-25 09:00:14 +00:00
KillianG ce076cb3df
Add test to check master version are skipped in v1 as well
Issue: ARSN-337
2023-05-23 13:30:57 +00:00
KillianG 4bc3de52ff
Filter delete marker from version suspended buckets
Issue: ARSN-337
2023-05-22 16:40:15 +00:00
bert-e beb5f69be3 Merge branch 'w/7.70/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/8.1/improvement/ARSN-335-implement-ghas 2023-05-19 15:59:38 +00:00
bert-e 5f3540a0d5 Merge branch 'w/7.10/improvement/ARSN-335-implement-ghas' into tmp/octopus/w/7.70/improvement/ARSN-335-implement-ghas 2023-05-19 15:59:38 +00:00
bert-e 654d628d39 Merge branch 'improvement/ARSN-335-implement-ghas' into tmp/octopus/w/7.10/improvement/ARSN-335-implement-ghas 2023-05-19 15:59:37 +00:00
gaspardmoindrot e8a409e337 [ARSN-335] Implement GHAS 2023-05-16 21:21:49 +00:00
Alexander Chan 4093bf2b04 bump version 2023-04-16 19:12:15 -07:00
Alexander Chan d0bb6d5b0c ARSN-334: add mongodb list in progress indexing jobs 2023-04-16 19:12:15 -07:00
bert-e 3f7229eebe Merge branch 'improvement/ARSN-309/addMongoIndexObjectTransforms' into q/8.1 2023-04-15 00:33:49 +00:00
bert-e 7eb9d52da5 Merge branch 'improvement/ARSN-328/excludedDataStoreName' into q/8.1 2023-04-14 21:50:25 +00:00
Nicolas Humbert e216c9dd20 ARSN-328 Exclude keys based on their dataStoreName 2023-04-14 14:42:13 -07:00
williamlardier 0c1afe535b
ARN-333: bump to 8.1.97 2023-04-14 20:05:57 +02:00
williamlardier 73335ae6ec
ARN-333: fix callback for adminDb command in sharded mode with mongo driver 2023-04-14 20:05:33 +02:00
Alexander Chan 99c514e8f2 bump version 2023-04-13 15:14:47 -07:00
Alexander Chan cfd9fdcfc4 bump eslint dependency 2023-04-13 15:14:30 -07:00
Alexander Chan d809dac5e3 ARSN-309: add mongodb index object helper methods 2023-04-13 15:14:01 -07:00
williamlardier 53dac8d233
ARSN-329: bump arsenal to 8.1.96 2023-04-13 16:29:37 +02:00
williamlardier 6d5ef07eee
ARSN-329: update latest changes 2023-04-13 16:29:36 +02:00
williamlardier 272166e406
ARSN-329: update tests 2023-04-13 15:44:43 +02:00
williamlardier 3af05e672b
ARSN-329: switch to promises as callbacks are deprecated 2023-04-13 15:44:42 +02:00
williamlardier 8b0c90cb2f
ARSN-329: bump mongodb driver 2023-04-13 15:44:39 +02:00
Alexander Chan dfc9b761e2 bump version 2023-04-12 14:00:31 -07:00
Alexander Chan 04f1eb7f04 ARSN-332: bump sproxydclient dependency 2023-04-12 14:00:31 -07:00
bert-e c204b90847 Merge branch 'feature/ARSN-324-add-s3-lifecycle-expiration-to-existing-object-delete-function' into q/8.1 2023-04-11 13:48:33 +00:00
bert-e 78d6e7fd72 Merge branch 'feature/ARSN-309/supportMongoIndexing' into q/8.1 2023-04-10 17:57:52 +00:00
Alexander Chan 7768fa8d35 ARSN-309: support indexing for mongo 2023-04-07 09:51:49 -07:00
KillianG 4d9a9adc48
Bump arsenal 8.1.94
Issue: ARSN-324
2023-04-07 12:35:50 +00:00
KillianG c4804e52ee
Add unit test for internal delete object function with custom origin OP
Issue: ARSN-324
2023-04-07 12:34:10 +00:00
KillianG 671cf3a679
Add argument to internal delete object in case the call is made from lifecycle expiration to avoid raising an objectremoved:delete event
Issue: ARSN-324
2023-04-07 12:34:10 +00:00
Jonathan Gramain 9a5e27f97b Merge remote-tracking branch 'origin/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined' into w/8.1/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined 2023-04-05 15:41:59 -07:00
Jonathan Gramain d744a709d2 ARSN-330 bump arsenal version 2023-04-05 15:40:53 -07:00
Jonathan Gramain a9d003c6f8 Merge remote-tracking branch 'origin/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined' into w/8.1/bugfix/ARSN-330-delimiterVersionsWithKeyContainingUndefined 2023-04-05 15:37:11 -07:00
Jonathan Gramain 99e04bd6fa bf: ARSN-330 fix DelimiterVersions exception when key contains "undefined"
Fix a crash when a listed key contains the string "undefined": as the
`key.indexOf` method was used without prior checking whether a
delimiter was set, it converted the delimiter to the string
"undefined", which could be found in a key containing such string, and
causing an exception thereafter.
2023-04-05 15:35:35 -07:00
Jonathan Gramain d3bdddeba3 Merge remote-tracking branch 'origin/improvement/ARSN-320-newObjectMDIsNull2' into w/8.1/improvement/ARSN-320-newObjectMDIsNull2 2023-04-04 09:31:14 -07:00
bert-e 3252f7de03 Merge branch 'feature/ARSN-317-bucketFileNullKeySupport' into tmp/octopus/w/8.1/feature/ARSN-317-bucketFileNullKeySupport 2023-04-04 16:11:03 +00:00
Jonathan Gramain c4cc5a2c3d ARSN-320 bump arsenal version to 7.70.4 2023-04-04 09:10:19 -07:00
Jonathan Gramain fedd0190cc impr: ARSN-320 add "isNull2" attribute to ObjectMD
This new attribute will be set whenever a Cloudserver supporting null
keys sets the "isNull" attribute to a master key, along with it.

The purpose of this attribute is to allow Cloudserver to optimize by
not having to check and delete a null versioned key when the null
master has "isNull2" set, as it is guaranteed not to exist.

We need to introduce a new attribute to keep backward compatibility,
the naming is a bit unfortunate but it has the benefit of being short
and not too specific to a particular optimization, just stating that
it is a "new" null master.
2023-04-04 09:10:19 -07:00
Jonathan Gramain 56fd4ad734 ft: ARSN-317 null key support in BucketFile backend
Support null keys in BucketFile backend - null keys are the new way to
store null versions, where a single database key with a specific empty
version ID is used instead of referencing the null version via
"nullVersionId" in object metadata.

Add relevant unit tests to check the new behavior (those were copied
and mechanically adapted from the Metadata repository).
2023-04-04 09:09:05 -07:00
Jonathan Gramain ebe6b65fcf ARSN-317 [rf] cleanup logging
Use "logger.addDefaultFields()" to set bucket, key and options to the
logs, which cleans up log calls.

Log repair errors with `log.error` unless it's ObjNotFound
2023-04-04 09:08:48 -07:00
Nicolas Humbert 7994bf7b96 ARSN-327 Bump Arsenal 8.1.92 2023-04-03 14:38:45 -04:00
Nicolas Humbert 4be0a06c4a ARSN-326 Lifecycle listings should handle null version 2023-04-03 08:39:08 -04:00
bert-e da7dbdc51f Merge branch 'improvement/ARSN-325-bump-sproxydclient' into q/8.1 2023-03-29 11:56:40 +00:00
Will Toozs 2103ef1237
ARSN-325: bump project version 2023-03-29 13:39:55 +02:00
Will Toozs dbc1c54246
ARSN-325: bump sproxydclient 2023-03-29 13:17:19 +02:00
bert-e 6c22f8404d Merge branch 'feature/ARSN-318-bucketFileListVersionKeys' into tmp/octopus/w/8.1/feature/ARSN-318-bucketFileListVersionKeys 2023-03-28 22:58:53 +00:00
KillianG 00e03f0592
bump 8.1.90
Issue: ARSN-323
2023-03-24 16:05:23 +00:00
KillianG d453758b7d
add s3:lifecycleexpiration to the list of supported notifications events
Issue: ARSN-322
2023-03-24 15:50:42 +00:00
KillianG a964dc99c3
Add: s3:LifecycleTransition event to the list of supportedNotificationEvents
Issue: ARSN-321
2023-03-24 10:09:02 +00:00
Jonathan Gramain 3a4da1d7c0 ARSN-318 port listVersionKeys() helper for BucketFile backend
Port the listVersionKeys() helper from the Metadata backend to the
BucketFile backend, as a first step towards supporting null keys in
BucketFile.
2023-03-23 10:57:41 -07:00
williamlardier 5074e6c0a4
ARSN-316: bump to 8.1.89 2023-03-21 13:33:32 +01:00
williamlardier bd05dd6918
ARSN-316: add tests for new mongodb routes 2023-03-21 13:32:18 +01:00
williamlardier fbda12ce3c
ARSN-316: individually update bucket capabilities 2023-03-21 13:32:15 +01:00
Nicolas Humbert b02934bb39 ARSN-319 bump arsenal 2023-03-16 13:03:27 -04:00
Nicolas Humbert c9a444969b ARSN-312 Add logic to list orphan delete markers for Lifecycle
DelimiterOrphan used for listing orphan delete marker.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the delete markers with zero noncurrent versions before a defined date: beforeDate.
2023-03-16 12:06:27 -04:00
Nicolas Humbert 5d018860ec ARSN-311 Add logic to list non-current versions for Lifecycle
DelimiterNonCurrent used for listing non-current version.The Metadata call returns the versions (V prefix).The MD response is then processed to only return the non-current versions that became non-current before a defined date: beforeDate.
2023-03-16 10:03:04 -04:00
bert-e 5838e02096 Merge branch 'feature/ARSN-310/listLifecycleCurrent' into q/8.1 2023-03-16 13:15:55 +00:00
Nicolas Humbert ecd600ac4b ARSN-310 Add logic to list current/master versions for Lifecycle
DelimiterCurrent used for listing current versions. The Metadata call returns the masters (M prefix) younger than a defined date: beforeDate. No extra filtering action is needed on the Metadata call response.
2023-03-16 08:40:14 -04:00
Naren ab0324da05 impr: ARSN-315 bump version to 8.1.87 2023-03-14 17:05:46 -07:00
Naren 2b353b33af Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-315-bump-version-7-10-46' into w/8.1/improvement/ARSN-315-bump-version-7-10-46 2023-03-14 17:02:30 -07:00
Naren 5377b20ceb impr: ARSN-315 bump version to 7.70.3 2023-03-14 16:52:08 -07:00
Naren 21b329b301 Merge remote-tracking branch 'origin/improvement/ARSN-315-bump-version-7-10-46' into w/7.70/improvement/ARSN-315-bump-version-7-10-46 2023-03-14 16:49:03 -07:00
Naren bd76402586 impr: ARSN-315 bump version 7.10.46 2023-03-14 16:25:06 -07:00
bert-e fd57f47be1 Merge branch 'w/7.70/improvement/ARSN-315-disable-default-metrics-collection' into tmp/octopus/w/8.1/improvement/ARSN-315-disable-default-metrics-collection 2023-03-14 23:13:08 +00:00
bert-e 94edf8be70 Merge branch 'improvement/ARSN-315-disable-default-metrics-collection' into tmp/octopus/w/7.70/improvement/ARSN-315-disable-default-metrics-collection 2023-03-14 23:13:08 +00:00
Naren 1d104345fd impr: ARSN-315 expose collecting default metrics as fn
Collecting default metrics should not be the default, should be invoked when needed. This causes build errors when multiple components use Arsenal.
2023-03-14 16:08:44 -07:00
Jonathan Gramain 58e47e5015 ARSN-306 [8.1 only] skip PHDs in DelimiterVersions V1
Since Artesca uses PHD keys in V1 format, skip them during listing of
versions
2023-03-09 10:03:02 -08:00
Jonathan Gramain 4d782ecec6 Merge remote-tracking branch 'origin/improvement/ARSN-306-delimiterVersionsNullKeySupport' into w/8.1/improvement/ARSN-306-delimiterVersionsNullKeySupport 2023-03-09 10:02:54 -08:00
Jonathan Gramain 655a10ce52 ARSN-306 version bump 2023-03-09 09:57:25 -08:00
Jonathan Gramain 0c7f0e607d ARSN-306 [doc] add state chart for DelimiterVersions
And a markdown file with summary of what the listing algo does
2023-03-09 09:56:28 -08:00
Jonathan Gramain caa5d53e9b impr: ARSN-306 support null keys in versions listing
Add support for null keys in versions listing:

- when they exist, output the null keys at the appropriate position in
  the Versions array

- handle KeyMarker/VersionIdMarker appropriately as if the null keys
  were real versions. This requires the listing to start at the very
  first version of the next key each time to see the null key, then
  potentially skip over the versions below VersionIdMarker using
  skip-scan optimization.
2023-03-09 09:56:28 -08:00
Jonathan Gramain 21da975187 ARSN-306 [refactor] DelimiterVersions state machine
Use a state machine for cleaner state management in DelimiterVersions
listing algo, with Typescript for enhanced type checking

Also, fix an inefficiency with listing params generated from the
KeyMarker parameter when there is a delimiter: it was listing more
keys than necessary when the KeyMarker equals a CommonPrefix.
2023-03-09 09:56:28 -08:00
bert-e e0df67a115 Merge branch 'bugfix/ARSN-314-missingDescribeInListObjectsTest' into q/8.1 2023-03-09 17:51:50 +00:00
Naren 7e18ae77e0 impr: ARSN-313 update healthprobe server tests 2023-03-08 19:30:41 -08:00
Naren 4750118f85 impr: ARSN-313 upgrade prom-client 2023-03-08 19:10:34 -08:00
Naren c273c8b823 Merge remote-tracking branch 'origin/w/7.70/improvement/ARSN-313-upgrade-prom-client' into w/8.1/improvement/ARSN-313-upgrade-prom-client 2023-03-08 19:01:16 -08:00
Jonathan Gramain d3b50fafa8 ARSN-314 [test fix] add missing describe() in listObject
Add a missing describe() block to avoid tests running in parallel for
v0 and v1. This usually led to v1 being used for all tests.
2023-03-08 18:38:49 -08:00
Naren 47e68a9b60 Merge remote-tracking branch 'origin/improvement/ARSN-313-upgrade-prom-client' into w/7.70/improvement/ARSN-313-upgrade-prom-client 2023-03-08 17:51:26 -08:00
Naren bd0a199ffa impr: ARSN-313 corrections in ZenkoMetrics
- retain metric config types
- set asPrometheus as async fn
2023-03-08 16:37:38 -08:00
Naren 4b1f69bcbb impr: ARSN-313 bump version to 7.10.45 2023-03-08 15:28:48 -08:00
Naren e3a6814e3f impr ARSN-313 upgrade prom-client 2023-03-08 15:27:30 -08:00
Alexander Chan bf4072151f Merge remote-tracking branch 'origin/w/7.70/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport' into w/8.1/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport 2023-03-01 05:26:10 -08:00
Alexander Chan f33cd69e45 Merge remote-tracking branch 'origin/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport' into w/7.70/bugfix/ARSN-308/addLifecycleUtilsNoncurrentVersionSupport 2023-03-01 04:55:37 -08:00
Alexander Chan acd13ff31b ARSN-308: update lifecycle utils to support noncurrent version
* update lifecycle utils to support
* remove `console.log`
2023-03-01 04:45:19 -08:00
Alexander Chan bb3e5d078f version bump 2023-03-01 04:44:30 -08:00
Jonathan Gramain 22fa04b7e7 Merge remote-tracking branch 'origin/feature/ARSN-307-bumpVersion' into w/8.1/feature/ARSN-307-bumpVersion 2023-02-23 23:02:17 -08:00
Jonathan Gramain 10a94a0a96 ARSN-307 bump version to 7.70.0 2023-02-23 23:00:46 -08:00
bert-e 4d71a834d5 Merge branch 'w/7.70/feature/ARSN-298/addHeapDataStructure' into tmp/octopus/w/8.1/feature/ARSN-298/addHeapDataStructure 2023-02-24 02:19:16 +00:00
Alexander Chan 054f61d6c1 ARSN-298: add Min/Max heap data structure 2023-02-23 18:19:05 -08:00
Alexander Chan fa26a487f5 Merge remote-tracking branch 'origin/w/7.70/feature/ARSN-298/supportNewerNoncurrentVersions' into w/8.1/feature/ARSN-298/supportNewerNoncurrentVersions 2023-02-23 16:04:47 -08:00
Alexander Chan c1dd2e4946 bump version 2023-02-23 15:03:26 -08:00
Alexander Chan a714103b82 ARSN-298: support lifecycle NewerNoncurrentVersions
updates `LifecyleConfiguration` and `LifecycleRule` to support the
`NewerNoncurrentVersions` parameter for NoncurrentVersionExpirations
2023-02-23 15:00:57 -08:00
Jonathan Gramain 66740f5aba Merge remote-tracking branch 'origin/bugfix/ARSN-284-revert' into w/8.1/bugfix/ARSN-284-revert 2023-01-30 16:16:05 +01:00
Jonathan Gramain a3a83dd89c ARSN-284 bump arsenal version 2023-01-30 16:10:02 +01:00
williamlardier 8db8109391 ARSN-267: fix failing unit test
NodeJS 16.17.0 introduced a change in the error handling of TLS sockets
in case of error. The connexion is closed before the response is sent,
so handling the ECONNRESET error in the affected test will unblock it,
until this is fixed by NodeJS, if appropriate.

(cherry picked from commit a237e38c51)
2023-01-30 16:10:02 +01:00
Jonathan Gramain d90af29019 Revert "ARSN-252 - listing bug in DelimisterMaster"
This reverts commit f62c3d22ed.
2023-01-30 16:07:06 +01:00
Jonathan Gramain 9d8d98fcc9 Revert "ARSN-269 - listing bug in versioned bucket edge cases."
This reverts commit 87b060f2ae.
2023-01-30 16:07:06 +01:00
Jonathan Gramain 01830d19a0 Revert "ARSN-284 [cleanup] remove unused test dependency"
This reverts commit 4f0a846814.
2023-01-30 16:07:05 +01:00
Jonathan Gramain 49cc018fa4 Revert "ARSN-284 [rf] delimiterVersions.addCommonPrefix()"
This reverts commit 7b64896234.
2023-01-30 16:07:05 +01:00
Jonathan Gramain dd87c869ca Revert "ARSN-284 fix and refactor Delimiter + DelimiterMaster"
This reverts commit 4d7eaee0cc.
2023-01-30 16:07:04 +01:00
Jonathan Gramain df44cffb96 Revert "ARSN-284 [doc] add state charts"
This reverts commit 1c07618b18.
2023-01-30 16:07:03 +01:00
Jonathan Gramain 164053d1e8 Revert "bugfix: ARSN-293 DelimiterMaster: default to vFormat=v0"
This reverts commit fbb62ef17c.
2023-01-30 16:07:03 +01:00
Jonathan Gramain af741c50fb Revert "bugfix: ARSN-294 use CommonPrefix for NextMarker"
This reverts commit 6e5d8d14af.
2023-01-30 16:07:02 +01:00
williamlardier 9c46703b89
ARSN-297: bump to 8.1.82 2023-01-23 16:49:28 +01:00
williamlardier 47672d60ce
ARSN-297: remove Version from request context 2023-01-23 16:46:32 +01:00
Jonathan Gramain 6d41d103e8 Merge remote-tracking branch 'origin/bugfix/ARSN-294-setNextMarkerToCommonPrefix' into w/8.1/bugfix/ARSN-294-setNextMarkerToCommonPrefix 2023-01-12 15:46:32 -08:00
Jonathan Gramain 34ccca9b07 ARSN-294 bump arsenal version 2023-01-12 15:28:28 -08:00
Jonathan Gramain 6e5d8d14af bugfix: ARSN-294 use CommonPrefix for NextMarker
Revert behavior introduced for S3C-7274 that changed NextMarker to an
object key instead of a common prefix, the ticket was invalid as AWS
does use a CommonPrefix.

Add a unit test for a corner case with a marker inside a prefix that
was only caught in Cloudserver functional tests.
2023-01-12 15:27:50 -08:00
Jonathan Gramain 890ac08dcd Merge remote-tracking branch 'origin/bugfix/ARSN-293-delimiterMasterDefaultsToV0' into w/8.1/bugfix/ARSN-293-delimiterMasterDefaultsToV0 2023-01-08 19:20:46 -08:00
Jonathan Gramain 4cda9f6a6b ARSN-293 bump arsenal version 2023-01-08 19:17:19 -08:00
Jonathan Gramain fbb62ef17c bugfix: ARSN-293 DelimiterMaster: default to vFormat=v0
The BucketFile interface (open-source) does not pass an explicit
vFormat to the constructor of the listing algorithm. DelimiterMaster
does not interpret it correctly and uses vFormat=v1 logic in this
case, resulting in wrong listing results.

Fix it by checking against `this.vFormat` that was set with a default
value by the Delimiter class, instead of directly using the
constructor parameter `vFormat`.
2023-01-08 19:14:39 -08:00
Jonathan Gramain 4949b7cc35 ARSN-284 [8.1] adjust routesToMem listing test 2023-01-06 16:37:36 -08:00
Jonathan Gramain 2b6fee4e84 Merge remote-tracking branch 'origin/bugfix/ARSN-284-refactorDelimiter' into w/8.1/bugfix/ARSN-284-refactorDelimiter 2023-01-06 16:02:30 -08:00
Jonathan Gramain 8077186c3a ARSN-284 bump version 2023-01-06 15:59:00 -08:00
Jonathan Gramain 1c07618b18 ARSN-284 [doc] add state charts
Add new state charts in GraphViz format for Delimiter and DelimiterMaster
2023-01-06 15:57:51 -08:00
Jonathan Gramain 4d7eaee0cc ARSN-284 fix and refactor Delimiter + DelimiterMaster
Large refactor of Delimiter and DelimiterMaster classes to typescript,
that fixes most known issues with the previous implementation.

The new implementation uses explicit states to manage various
conditions, instead of relying on a bunch of internal variable values
and maintaining their state. It allows a more robust code flow and
fixes issues related to prefix skipping that were hard to fix by
keeping the overall logic of the previous implementation.

This refactor brings the following bug fixes and enhancements:

- prefixes with delete markers and non-deleted objects are
  now always included in CommonPrefixes (S3C-7248)

- no more duplication of internal range listings when doing skip-scan
  over prefixes (discovered when analyzing regressions for S3C-4682)

- the skip-scan mecanism for prefixes and versions is no
  more disturbed by delete markers and PHD keys (S3C-2930)

- NextMarker is now always set to a valid, listed or listable key
  (that may still be hidden under a CommonPrefix), no more
  manipulation of next marker to avoid corner-cases with keys ending
  with a prefix (S3C-4682 and S3C-7274)

- deleting a delete marker immediately allows the new current version
  to be visible in the listing (S3C-7272)

- Expecting lower CPU usage overall, as the number of checks to do in
  each state is reduced (may help to reduce the load and reduce impact
  of cases such as S3C-3946)

- Uses typescript to allow more sanity checks
2023-01-06 15:57:19 -08:00
williamlardier c460338163
ARSN-291: bump arsenal to 8.1.78 2023-01-04 14:02:35 +01:00
williamlardier f17d52b602
ARSN-291: use separate function to get specific capability 2023-01-04 14:02:35 +01:00
williamlardier a6b234b7a8
ARSN-291: new bucket field for capabilities 2023-01-04 12:19:59 +01:00
williamlardier ff353bb4d6
ARSN-291: document new field for capabilities 2022-12-26 09:26:34 +01:00
williamlardier 0f9c9c2f18
ARSN-289: bump Arsenal to 8.1.77 2022-12-20 17:20:00 +01:00
williamlardier f6b2cf2c1a
ARSN-289: bump projects for better sockets handling 2022-12-20 17:19:41 +01:00
Kerkesni ecafbae36a
bugfix: ARSN-278 bump version 2022-12-19 15:52:11 +01:00
Kerkesni d1cd7e8dba
bugfix: ARSN-278 handle getting versionId when object is versioning suspended
When replicating a versioning suspended object, we need to specify 'null'
as the encoded versionId as the versionId contained within the object's
metadata is strictly internal

In the replication processor we use getVersionId() when putting/deleting a tag.
It's used by the mongoClient to fetch the object from MongoDB, here again we
need to specify 'null' to get the versioning suspended object (cloudserver already
knows how to handle 'null' versionId and transforms it to undefined before giving
it to the mongoClient)
2022-12-19 15:51:56 +01:00
Francois Ferrand 3da6719200
Release 8.1.75
Issue: ARSN-273
2022-12-16 15:51:07 +01:00
Francois Ferrand c0dd54ef51
Support alternate azure auth method
Issue: ARSN-273
2022-12-16 15:48:17 +01:00
Francois Ferrand 7910792390
Fix commit blocks list 2022-12-16 15:46:07 +01:00
Francois Ferrand a4f4c51290
Fix mpu block id
it must be base64-encoded in new azure API.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 66c4bc52b5
AzureClient : Cleanup _errorWrapper
Make better use of async and simplify error handling.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 81cd6652d6
Use new url parser in mongoclient
This fixes a warning in logs. Old parser is deprecated, and will be
removed at some point.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 2a07f67244
Fix yarn warning
Issue: ARSN-281
2022-12-16 15:46:07 +01:00
Francois Ferrand 1a634015ee
Upgrade azure sdk
There are a few caveats:
* The `proxy.certs` param is not used anymore (though looking at old SDK
code it may not have been supported already)
* `azureStreamingOptions/options` parameters have not been updated. The
old options (`range` and `DateUnModifiedSince`) are still used and
supported, to avoid compatibility issues.

Issue: ARSN-281
2022-12-16 15:46:07 +01:00
williamlardier 7a88a54918
ARSN-277: bump project version 2022-12-14 17:18:19 +01:00
williamlardier b25e620750
ARSN-277: use JS version of httpagent 2022-12-14 17:18:19 +01:00
williamlardier 38ef89cc83
ARSN-277: standard private repos import 2022-12-14 10:03:32 +01:00
williamlardier 1a6c828bfc
ARSN-277: update jest configuration for typescript subdeps 2022-12-13 20:07:48 +01:00
williamlardier 3d769c6960
ARSN-277: ensure install dependencies step is stable 2022-12-13 20:07:48 +01:00
williamlardier 8a27920a85
ARSN-277: update logic according to changes 2022-12-13 20:07:47 +01:00
williamlardier 7642a22176
ARSN-277: bump projects and add httpagent 2022-12-13 20:07:43 +01:00
Jonathan Gramain 7b64896234 ARSN-284 [rf] delimiterVersions.addCommonPrefix()
Copy addCommonPrefix from Delimiter to DelimiterVersions to prepare for the rehaul of Delimiter class, and make it use this.NextMarker directly
2022-12-09 14:22:40 -08:00
Jonathan Gramain 4f0a846814 ARSN-284 [cleanup] remove unused test dependency 2022-12-09 14:15:13 -08:00
bert-e 8f63687ef3 Merge branch 'feature/ARSN-280-abstract-update' into q/8.1 2022-11-18 15:11:34 +00:00
Kerkesni 26f45fa81a
feature: ARSN-280 bump version to 8.1.73 2022-11-18 16:00:59 +01:00
Kerkesni 76b59057f7
feature: ARSN-280 Set update event's type to delete
The update operation we do just before deleting an object, where
we set the deletion flag will be used as the deletion event as contrary
to the actual deletion event it contains object metadata.
2022-11-18 16:00:59 +01:00
Kerkesni ae0da3d605
feature: ARSN-279 support S3:ObjectRestore event notifications 2022-11-15 17:21:19 +01:00
bert-e 7c1bd453ee Merge branch 'feature/ARSN-235-update-object-before-deleting-it' into q/7.10 2022-11-14 09:20:17 +00:00
bert-e 162d9ec46b Merge branch 'q/1944/7.10/feature/ARSN-235-update-object-before-deleting-it' into tmp/normal/q/8.1 2022-11-14 09:20:17 +00:00
Kerkesni ccd6462015
feature: ARSN-235 bump version to 8.1.72 2022-11-14 10:10:49 +01:00
Kerkesni 665c77570c
feature: ARSN-235 fix ObjectMD unit tests 2022-11-13 22:16:59 +01:00
Kerkesni 27307b397c
feature: ARSN-235 unskip unit tests in 8.x 2022-11-13 22:16:59 +01:00
Kerkesni 414eada32b
feature: ARSN-235 add functional tests 2022-11-13 22:16:59 +01:00
Kerkesni fdf0c6fe99
feature: ARSN-235 add isPHD flag to ObjectMD model
The "isPHD" flag serves showing that a master object is in a temporary
invalid state that gets repaired asynchronously after a certain period
of time. The repair either updates the metadata or deletes the master
object.

This invalid state happens when deleting the last version of an object.
Previously the "isPHD" flag was set directly inside the object metadata
without going through the ObjectMD model, which is not ideal.
2022-11-13 22:16:58 +01:00
Kerkesni 8cc0be7da2
feature: ARSN-235 add deletion flag to ObjectMD model
Deletion flag serves showing that an object is in the process of
beeing deleted, the object's metadata is updated with deletion flag
set to true before deleting it to keep a trace of the latest metadata
inside the oplog as normal mongo delete events don't contain any metadata.
2022-11-13 22:16:58 +01:00
bert-e 65231633a7 Merge branch 'feature/ARSN-235-update-object-before-deleting-it' into tmp/octopus/w/8.1/feature/ARSN-235-update-object-before-deleting-it 2022-11-13 21:16:18 +00:00
Kerkesni 9a975723c1
feature: ARSN-235 document oplog 2022-11-13 22:04:29 +01:00
Kerkesni ef024ddef3
feature: ARSN-235 fix unit tests 2022-11-13 22:04:29 +01:00
Kerkesni b61138a348
feature: ARSN-235 ignore objects flagged for deletion when listing objects 2022-11-13 22:04:28 +01:00
Kerkesni d852eef08e
feature: ARSN-235 ignore objects flagged for deletion when getting object 2022-11-13 22:04:28 +01:00
Kerkesni fd63b857f3
feature: ARSN-235 update object before deletion
Object deletion no longer directly deletes the object, it first
updates its metadata by setting the deletion flag and originOp then
proceeds to deleting the object.

This is done to keep a trace of the latest object metadata before deletion
in the oplog, as oplog delete events don't hold that information. This
information is needed for both Cold Storage and Bucket Notification

We also add all the object metadata to the placeholder (PHD) master
which wasn't previously the case, again this is done to keep the metadata
in the oplog as a PHD might get directly deleted in the repair phase.
2022-11-13 22:04:28 +01:00
Alexander Chan 92c567414a bump version to 8.1.71 2022-11-07 16:26:38 -08:00
Alexander Chan ec55e39175 ARSN-276: putObjectVerCase3 - add check for v1 format and versioned updates
erronenous master entry is created when performing previous version
update in v1 format bucket.

added fix:
* check to see if update is to a previous version update
* check if master entry exists
* if master entry doesn't exist and operation is an update to a previous
  version, skip upsert
2022-11-07 16:22:50 -08:00
Jonathan Gramain c343820cae Merge remote-tracking branch 'origin/bugfix/ARSN-274-fixBucketPolicyActionMap' into w/8.1/bugfix/ARSN-274-fixBucketPolicyActionMap 2022-11-01 18:34:44 -07:00
Jonathan Gramain 0f9da6a44e ARSN-274 bump version to 7.10.38 2022-11-01 18:20:58 -07:00
Jonathan Gramain 53a42f7411 bugfix: ARSN-274 move `objectHead` action in shared map
Move the `objectHead` action in the shared action map so that bucket
policies can use it and grant HEAD request access when 's3:GetObject'
permission is present.

Note: relevant tests will be added in Cloudserver, see CLDSRV-291
2022-11-01 18:18:51 -07:00
Jonathan Gramain 9c2bed8034 cleanup: ARSN-274 remove duplicate notification actions 2022-11-01 15:24:37 -07:00
williamlardier 8307a1513e
ARSN-272: bump version 2022-10-03 09:34:51 +02:00
williamlardier 706c2425fe
ARSN-272: support array of arrays for req context 2022-10-03 09:34:47 +02:00
williamlardier 8618d77de9
Merge remote-tracking branch 'origin/improvement/ARSN-270-use-standard-permission-names' into w/8.1/improvement/ARSN-270-use-standard-permission-names 2022-09-27 09:18:08 +02:00
williamlardier 9d614a4ab3
ARSN-270: bump project version 2022-09-27 09:15:28 +02:00
williamlardier 7763685cb0
ARSN-270: change bad permission names 2022-09-27 09:14:53 +02:00
Artem Bakalov 8abe746222 Merge remote-tracking branch 'origin/improvement/ARSN-271-bump-version' into w/8.1/improvement/ARSN-271-bump-version 2022-09-26 20:04:36 -07:00
Artem Bakalov 4c6712741b v7.10.36 2022-09-26 19:43:43 -07:00
bert-e e74cca6795 Merge branch 'bugfix/ARSN-269-listing-bug-versioned-bucket-edge-case' into tmp/octopus/w/8.1/bugfix/ARSN-269-listing-bug-versioned-bucket-edge-case 2022-09-23 23:58:08 +00:00
Artem Bakalov 87b060f2ae ARSN-269 - listing bug in versioned bucket edge cases.
Simplifies testing that was used in ARSN-262. Adds a function allowDelimiterRangeSkip
to determine when a nextContinueMarker range can be skipped when .skipping is called.
This function uses a new state variable prefixKeySeen and the nextContinueMarker to determine
if a range of the form prefix/ can be skipped. An additional check is added when processing
delete markers of the form prefix/foo/(bar) so that the prefix/foo/ range can still be skipped
as an optimization.
2022-09-22 20:03:47 -07:00
bert-e 1427abecb7 Merge branches 'q/1982/7.10/bugfix/ARSN-252-listing-bug-versioned-bucket' and 'w/8.1/bugfix/ARSN-252-listing-bug-versioned-bucket' into tmp/octopus/q/8.1 2022-09-16 10:30:20 +00:00
bert-e 9dc357ab8d Merge branch 'bugfix/ARSN-252-listing-bug-versioned-bucket' into q/7.10 2022-09-16 10:30:19 +00:00
bert-e 4771ce3067 Merge branch 'bugfix/ARSN-252-listing-bug-versioned-bucket' into tmp/octopus/w/8.1/bugfix/ARSN-252-listing-bug-versioned-bucket 2022-09-16 02:26:12 +00:00
Artem Bakalov f62c3d22ed ARSN-252 - listing bug in DelimisterMaster
DelimiterMaster.filter is used to determine when a key range can be skipped in Metadata:RepdServer to optimize listing performance.
When a bucket is created with vFormat=v0, and subsequently a listing is done with a prefix, DelimiterMaster.filter was incorrectly
determining that a range could be skipped if a key was listed such that key == prefix. This case is now correctly handled in filterV0.
2022-09-15 19:05:29 -07:00
williamlardier 4e8a907d99
Merge remote-tracking branch 'origin/improvement/ARSN-267-support-updaterole-action' into w/8.1/improvement/ARSN-267-support-updaterole-action 2022-09-07 13:30:51 +02:00
williamlardier a237e38c51
ARSN-267: fix failing unit test
NodeJS 16.17.0 introduced a change in the error handling of TLS sockets
in case of error. The connexion is closed before the response is sent,
so handling the ECONNRESET error in the affected test will unblock it,
until this is fixed by NodeJS, if appropriate.
2022-09-07 13:22:30 +02:00
williamlardier 4388cb7790
ARSN-267: bump project version 2022-09-06 10:43:42 +02:00
williamlardier 095a2012cb
ARSN-267: support UpdateRole action 2022-09-06 10:43:30 +02:00
Killian Gardahaut 6f42b3e64c Merge remote-tracking branch 'origin/improvement/ARSN-266-change-bucketownedbyyou-error-message' into w/8.1/improvement/ARSN-266-change-bucketownedbyyou-error-message 2022-08-24 13:27:00 +00:00
Killian Gardahaut 264e0c1aad ARSN-266: change create bucket owned by you message error 2022-08-24 13:17:29 +00:00
Jonathan Gramain 237872a5a3 Merge remote-tracking branch 'origin/feature/ARSN-265-release-7.10.33' into w/8.1/feature/ARSN-265-release-7.10.33 2022-08-17 16:29:30 -07:00
Jonathan Gramain 0130355e1a ARSN-265 release 7.10.33 2022-08-17 16:26:52 -07:00
bert-e 390fd97edf Merge branch 'bugfix/ARSN-263/cb' into q/8.1 2022-08-17 22:50:41 +00:00
Nicolas Humbert 1c9e4eb93d bump version 2022-08-17 18:43:20 -04:00
bert-e af50ef47d7 Merge branch 'bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' into q/7.10 2022-08-17 22:01:22 +00:00
bert-e a4f163f466 Merge branches 'w/8.1/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' and 'q/1989/7.10/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' into tmp/octopus/q/8.1 2022-08-17 22:01:22 +00:00
Nicolas Humbert 4d0cc9bc12 ARSN-263 retrieveData callback should only be called once 2022-08-17 12:41:33 -04:00
bert-e 657f969d05 Merge branch 'bugfix/ARSN-262-fixRequestContextConstructor' into tmp/octopus/w/8.1/bugfix/ARSN-262-fixRequestContextConstructor 2022-08-12 01:24:07 +00:00
Jonathan Gramain 4f2b1ca960 bugfix: ARSN-262 fixes/tests in RequestContext
- remove "postXml" field, as it was a left-over from prototyping

- handle fields related to tag conditions: requestObjTags,
  existingObjTag, needTagEval, those were missing from constructor
  params

- fix a typo in serialization: requersterInfo -> requesterInfo

- new unit tests for RequestContext
  constructor/serialize/deserialize/getters
2022-08-11 18:19:38 -07:00
bert-e b43cf22b2c Merge branch 'bugfix/ARSN-255-revampEvaluatePolicyForTagConditions' into tmp/octopus/w/8.1/bugfix/ARSN-255-revampEvaluatePolicyForTagConditions 2022-08-10 22:04:06 +00:00
Killian Gardahaut 46c44ccaa6 Merge remote-tracking branch 'origin/improvement/ARSN-261-bump-7-10-32' into w/8.1/improvement/ARSN-261-bump-7-10-32 2022-08-10 08:38:02 +00:00
Killian Gardahaut f45f65596b ARSN-261: bump 7.10.32 2022-08-10 08:36:22 +00:00
bert-e 90c63168c1 Merge branches 'w/8.1/improvement/ARSN-257-bump-7-10-31' and 'q/1980/7.10/improvement/ARSN-257-bump-7-10-31' into tmp/octopus/q/8.1 2022-08-10 08:17:10 +00:00
bert-e 10402ae78d Merge branch 'improvement/ARSN-257-bump-7-10-31' into q/7.10 2022-08-10 08:17:10 +00:00
Jonathan Gramain 5cd1df8601 bugfix: ARSN-255 revamp evaluatePolicy logic for tag conditions
Rethink the logic of tag condition evaluation, so that the
"evaluateAllPolicies" function appropriately returns the verdict:
Allow or Deny or NeedTagConditionEval, the latter being when tag
values (request and/or object tags) are needed to settle the verdict
to Allow or Deny, in which case, Cloudserver knows it has to resend
the request to Vault along with tag info.
2022-08-09 18:43:58 -07:00
Jonathan Gramain ee38856f29 ARSN-255 [cleanup] better exports in evaluator.ts
Turn 'const' function objects into actual functions.
2022-08-09 18:29:16 -07:00
Jonathan Gramain fe5f868f43 Merge remote-tracking branch 'origin/improvement/ARSN-260-findConditionKeyInefficiency' into w/8.1/improvement/ARSN-260-findConditionKeyInefficiency 2022-08-09 18:00:46 -07:00
Jonathan Gramain dc229bb8aa improvement: ARSN-260 improve efficiency of findConditionKey
Instead of pre-creating a Map with all supported condition keys before
returning the wanted one, use a switch/case construct to directly
return the attribute from the request context.
2022-08-09 17:54:58 -07:00
Killian Gardahaut c0ee81eb7a Merge remote-tracking branch 'origin/improvement/ARSN-257-bump-7-10-31' into w/8.1/improvement/ARSN-257-bump-7-10-31 2022-08-09 15:35:13 +00:00
Killian Gardahaut a6a48e812f ARSN-257: bump 7.10.31 2022-08-09 15:32:33 +00:00
bert-e 604a0170f1 Merge branches 'w/8.1/feature/ARSN-256-supportTaggingAndAclEvents' and 'q/1978/7.10/feature/ARSN-256-supportTaggingAndAclEvents' into tmp/octopus/q/8.1 2022-08-08 19:41:51 +00:00
bert-e 5a8372437b Merge branch 'feature/ARSN-256-supportTaggingAndAclEvents' into q/7.10 2022-08-08 19:41:50 +00:00
Killian Gardahaut 9d8f4793c9 Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-08 13:53:39 +00:00
Killian Gardahaut 69d33a3341 ARSN-253: Speedup aws URI encore function 2022-08-08 13:49:18 +00:00
Killian Gardahaut c4ead93bd9 ARSN-253: Speedup aws URI encore function 2022-08-05 10:05:41 +00:00
Jonathan Gramain 981c9c1a23 Merge remote-tracking branch 'origin/feature/ARSN-256-supportTaggingAndAclEvents' into w/8.1/feature/ARSN-256-supportTaggingAndAclEvents 2022-08-04 17:00:45 -07:00
Jonathan Gramain 71de409ee9 feature: ARSN-256 support tagging and ACL events
Add to the list of supported event types for bucket notification
purpose, the tagging and ACL-related events that can be set in bucket
notification

Reference: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-event-types-and-destinations.html#supported-notification-event-types
2022-08-04 16:57:23 -07:00
KillianG 806f988334
Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-03 10:13:53 +02:00
KillianG 976a05c3e5
Merge branch 'w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars' of github.com:scality/arsenal into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-03 10:03:35 +02:00
KillianG 46c24c5cc3
fixup! bugfix/ARSN-253: adding test and better handling of all the possible cases 2022-08-03 10:01:28 +02:00
Killian Gardahaut c5004cb521 Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-02 12:42:30 +00:00
KillianG bc9cfb0b6d
ARSN-254: Fix constness problem 2022-08-02 13:14:56 +02:00
KillianG 4b6e342ff8
Merge remote-tracking branch 'origin/bugfix/ARSN-253-issue-with-special-unicode-chars' into w/8.1/bugfix/ARSN-253-issue-with-special-unicode-chars 2022-08-02 13:09:01 +02:00
Killian Gardahaut d48d4d0c18 bugfix/ARSN-253: adding test and better handling of all the possible cases 2022-08-02 08:43:54 +00:00
Killian Gardahaut 5a32c8eca0 bugfix/ARSN-253:
fixing the problem with unicode special chars by encoding them with URI
Problem was that our encoreURI function was not working properly for special chars
2022-08-01 12:55:35 +00:00
Kerkesni 480f5a4427
bugfix: ARSN-251 bump arsenal to 8.1.64 2022-07-22 15:15:22 +02:00
bert-e 852ae9bd0f Merge branch 'bugfix/ARSN-251-fix-azure-mpuUtils-import' into tmp/octopus/w/8.1/bugfix/ARSN-251-fix-azure-mpuUtils-import 2022-07-22 13:13:02 +00:00
Kerkesni 6c132bca90
bugfix: ARSN-251 fix azure mpuUtils import 2022-07-22 15:07:20 +02:00
Taylor McKinnon 3d77540c47 Merge remote-tracking branch 'origin/bugfix/ARSN-250/fix_getByteRangeFromSpec_edgecase' into w/8.1/bugfix/ARSN-250/fix_getByteRangeFromSpec_edgecase 2022-07-21 11:45:24 -07:00
Taylor McKinnon 3882ecf1a0 bf(ARSN-250): Fix getByteRangeFromSpec when range is 0-0 2022-07-21 11:42:16 -07:00
Taylor McKinnon 4f0506cf31 Merge remote-tracking branch 'origin/improvement/ARSN-248/release_7_10_28' into w/8.1/improvement/ARSN-248/release_7_10_28 2022-07-20 14:18:01 -07:00
Taylor McKinnon acf38cc010 impr(ARSN-248): Release 7.10.28 2022-07-20 14:11:56 -07:00
Nicolas Humbert d92a91f076 bump package version 2022-07-19 08:52:56 +02:00
Nicolas Humbert 28779db602 bugfix/ARSN-247 data.delete 404 errors not handled properly 2022-07-19 08:40:02 +02:00
Alexander Chan 8db16c5532 ARSN-246: fix non-current transition rule comparison
fix issue in which non-current transition rule is compared to a
transition object
2022-07-12 16:55:26 -07:00
Jordi Bertran de Balanda 33439ec215 Merge remote-tracking branch 'origin/improvement/ARSN-245-release-7.10.27' into w/8.1/improvement/ARSN-245-release-7.10.27 2022-07-12 16:12:19 +02:00
Jordi Bertran de Balanda 785b824b69 ARSN-245 - release 7.10.27 2022-07-11 18:17:45 +02:00
bert-e 9873c0f112 Merge branch 'bugfix/ARSN-244-missing-ismasterkey-export' into tmp/octopus/w/8.1/bugfix/ARSN-244-missing-ismasterkey-export 2022-07-11 16:05:28 +00:00
Jordi Bertran de Balanda 63212e2db3 ARSN-244 - export isMasterKey in versioning 2022-07-11 16:59:29 +02:00
Nicolas Humbert 725a492c2c ARSN-243 bump 8.1.60 2022-07-11 11:51:26 +02:00
Nicolas Humbert e446e3e132 ARSN-242 Fix non-current version transition 2022-07-09 11:46:19 +02:00
bert-e 25c6b34a1e Merge branch 'improvement/ARSN-240/transition' into q/8.1 2022-07-08 17:54:09 +00:00
Jordi Bertran de Balanda 721d7ede93 Merge remote-tracking branch 'origin/improvement/ARSN-241-release-arsenal-7.10.26' into w/8.1/improvement/ARSN-241-release-arsenal-7.10.26 2022-07-08 15:13:10 +02:00
Jordi Bertran de Balanda 3179d1c620 ARSN-241 - release arsenal 7.10.26 2022-07-08 15:07:38 +02:00
Nicolas Humbert fbbba32d69 Introduce x-amz-scal-transition-in-progress object md 2022-07-08 12:47:30 +02:00
Jordi Bertran de Balanda 56c1ba5c21 ARSN-239 - release arsenal 8.1.59 2022-07-08 11:02:52 +02:00
Will Toozs 73431094a3
Merge remote-tracking branch 'origin/bugfix/ARSN-238' into w/8.1/bugfix/ARSN-238 2022-07-08 09:58:02 +02:00
Will Toozs aed1d8419b
ARSN-238: add documentation on listing process 2022-07-08 09:49:32 +02:00
Will Toozs c3cb0aa514
ARSN-238: ignore phd keys with no versions 2022-07-08 09:49:32 +02:00
bert-e 5919d20fa4 Merge branch 'w/8.1/improvement/ARSN-234' into tmp/octopus/q/8.1 2022-07-06 17:18:25 +00:00
Nicolas Humbert 56665069c1 ARSN-237 bump to 8.1.58 2022-07-05 20:14:07 +02:00
Nicolas Humbert 61fe54bd73 ARSN-236 Put bucket replication to dmf is not supported 2022-07-05 15:42:52 +02:00
Francois Ferrand e227d9d5ca
Merge remote-tracking branch 'origin/improvement/ARSN-234' into w/8.1/improvement/ARSN-234 2022-07-01 18:24:06 +02:00
Francois Ferrand a206b5f95e
Remove check with empty bucket name
This test is not relevant, since a bucket cannot have an empty name;
and there is now a check in AWS SDK which rejects the request directly.

Issue: ARSN-234
2022-07-01 18:18:05 +02:00
Francois Ferrand 9b8f9f8afd
Bump aws-sdk to 2.1005+
Use same spec as other packages (utapi, vault...), and allow automatic
version bump (dependabot).

Issue: ARSN-234
2022-06-30 15:13:09 +02:00
Francois Ferrand cdcc44d272
Merge remote-tracking branch 'origin/improvement/ARSN-233' into w/8.1/improvement/ARSN-233 2022-06-29 12:02:25 +02:00
Francois Ferrand 066be20a9d
Bump azure-storage to 2.10.7
Issue: ARSN-233
2022-06-29 11:45:14 +02:00
Xin LI 5acef6895f Merge remote-tracking branch 'origin/improvement/ARSN-225-add-User-Tag-actions' into w/8.1/improvement/ARSN-225-add-User-Tag-actions 2022-06-20 18:22:20 +02:00
Xin LI 6e3386f693 improvement: ARSN-225- correct UntagUser action name 2022-06-20 12:17:49 +02:00
Xin LI 2c630848ee improvement: ARSN-225-bump version 2022-06-17 12:19:20 +02:00
williamlardier f7d360fe0b
ARSN-227: bump package version and improve tags validation 2022-06-16 19:18:53 +02:00
williamlardier 0a61b43252
ARSN-227: refining type and validation 2022-06-16 19:18:52 +02:00
williamlardier c014e630be
ARSN-227: introduce BucketTag type and improve tag checking 2022-06-16 19:18:52 +02:00
williamlardier a747d5feda
ARSN-227: add unit tests for bucket tags 2022-06-16 19:18:51 +02:00
KillianG 765857071a
ARSN-227: update bucket info model 2022-06-16 19:18:51 +02:00
KillianG 91b39da7e5
ARSN-227: support bucket tags in Bucket Info 2022-06-16 19:18:50 +02:00
williamlardier 2cc6ebe9b4
ARSN-227: Add NoSuchTag error 2022-06-16 19:18:50 +02:00
Xin LI 5634e1bb1f improvement: ARSN-225-add User Tag actionMaps 2022-06-16 10:57:56 +02:00
williamlardier 7887d22d0d
ARSN-232: bump arsenal 2022-06-15 17:25:11 +02:00
williamlardier 2f142aea7f
ARSN-232: add missing permissions for Version 2022-06-15 17:24:51 +02:00
williamlardier 26a046c9b2
ARSN-224: bump package.json to 8.1.54 2022-06-10 14:15:02 +02:00
bert-e ab23d59daf Merge branch 'bugfix/ARSN-224-fix-models-imports' into tmp/octopus/w/8.1/bugfix/ARSN-224-fix-models-imports 2022-06-10 12:00:50 +00:00
williamlardier b744385584
ARSN-224: fix default value for the filter of bucket notif config 2022-06-10 14:00:34 +02:00
bert-e 6950df200a Merge branch 'bugfix/ARSN-224-fix-models-imports' into tmp/octopus/w/8.1/bugfix/ARSN-224-fix-models-imports 2022-06-10 10:20:14 +00:00
williamlardier d407cd702b
ARSN-224: fix missing default for models imports 2022-06-10 12:19:15 +02:00
williamlardier 3265d162a7
ARSN-223: bump package.json version 2022-06-10 11:21:31 +02:00
bert-e 67200d80ad Merge branch 'bugfix/ARSN-223-fix-wgm-default-import' into tmp/octopus/w/8.1/bugfix/ARSN-223-fix-wgm-default-import 2022-06-10 09:20:40 +00:00
williamlardier 20a071fba9
ARSN-223: fix file imports with default 2022-06-10 11:19:52 +02:00
bert-e aa2992cd9f Merge branches 'w/8.1/feature/ARSN-209-type-check-models' and 'q/1920/7.10/feature/ARSN-209-type-check-models' into tmp/octopus/q/8.1 2022-06-10 08:09:10 +00:00
bert-e f897dee3c5 Merge branch 'feature/ARSN-209-type-check-models' into q/7.10 2022-06-10 08:09:09 +00:00
williamlardier 0e2071ed3b
ARSN-221: bump package.json version to 8.1.52 2022-06-09 11:51:24 +02:00
williamlardier ad579b2bd2
Bump SproxydClient version in package.json
Integrates the Node16 bugfix of SproxydClient
in Artesca.
2022-06-09 11:49:16 +02:00
Guillaume Hivert 139da904a7 Merge remote-tracking branch 'origin/feature/ARSN-209-type-check-models' into w/8.1/feature/ARSN-209-type-check-models 2022-06-09 10:15:31 +02:00
Guillaume Hivert e8851b40c0 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-209-type-check-models 2022-06-09 10:15:21 +02:00
Guillaume Hivert 536f36df4e ARSN-209 Fix JSDoc as asked in PR 2022-06-09 10:04:02 +02:00
Naren cd9456b510 bf: ARSN-220 export isMasterKey in versioning module 2022-06-08 17:13:17 -07:00
Alexander Chan 15f07538d8 ARSN-218: enable lifecycle noncurrent version transition 2022-05-28 01:26:49 -07:00
Guillaume Hivert e95d07af12 Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:58:41 +02:00
Guillaume Hivert 571128efb1 Fix TODOs 2022-05-25 11:57:13 +02:00
Guillaume Hivert f1478cbc66 Fix TODOs 2022-05-25 11:56:45 +02:00
Guillaume Hivert b21f7f3440 Fix TODOs 2022-05-25 11:55:09 +02:00
Guillaume Hivert ca2d23710f Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:28:53 +02:00
Guillaume Hivert 310fd30266 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-25 11:28:44 +02:00
Guillaume Hivert 75c5c855d9 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-05-25 11:27:11 +02:00
Guillaume Hivert 8743e9c3ac ARSN-209 Fix imports/exports in tests 2022-05-20 18:08:57 +02:00
bert-e b2af7c0aea Merge branch 'feature/ARSN-209-type-check-models' into tmp/octopus/w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:05:39 +00:00
Guillaume Hivert 43d466e2fe ARSN-209 Fix import due to rebase of development/7.10 2022-05-20 18:05:30 +02:00
bert-e 58c24376aa Merge branch 'feature/ARSN-209-type-check-models' into tmp/octopus/w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:02:41 +00:00
Guillaume Hivert efa8c8e611 ARSN-209 Fix linter error in tests 2022-05-20 18:02:32 +02:00
Guillaume Hivert 62c13c1eed ARSN-209 Fix everything in 8.1 2022-05-20 18:00:57 +02:00
Guillaume Hivert ee81fa5829 Merge remote-tracking branch 'origin/feature/ARSN-209-type-check-models' into w/8.1/feature/ARSN-209-type-check-models 2022-05-20 16:57:12 +02:00
Guillaume Hivert 820ad4f8af ARSN-209 Fix imports/exports of models 2022-05-20 16:23:24 +02:00
Guillaume Hivert 34eeecf6de ARSN-209 Type check BucketInfo 2022-05-20 16:23:24 +02:00
Guillaume Hivert 050f5ed002 ARSN-209 Type check NotificationConfiguration 2022-05-20 16:23:20 +02:00
Guillaume Hivert 2fba338639 ARSN-209 Type check LifecycleConfiguration 2022-05-20 16:20:55 +02:00
Guillaume Hivert 950ac8e19b ARSN-209 Type check ObjectMD 2022-05-20 16:20:55 +02:00
Guillaume Hivert 61929bb91a ARSN-209 Type check ReplicationConfiguration 2022-05-20 16:20:55 +02:00
Guillaume Hivert 9175148bd1 ARSN-209 Type check WebsiteConfiguration 2022-05-20 16:20:55 +02:00
Guillaume Hivert 5f08ea9310 ARSN-209 Type check ObjectMDLocation 2022-05-20 16:20:55 +02:00
Guillaume Hivert 707bf795a9 ARSN-209 Type check ObjectLockConfiguration 2022-05-20 16:20:55 +02:00
Guillaume Hivert fcf64798dc ARSN-209 Type check LifecycleRules 2022-05-20 16:20:55 +02:00
Guillaume Hivert 9b607be633 ARSN-209 Type check BucketPolicy 2022-05-20 16:20:55 +02:00
Guillaume Hivert 01a8992cec ARSN-209 Type check BackendInfo 2022-05-20 16:20:55 +02:00
Guillaume Hivert 301541223d ARSN-209 Type check ARN 2022-05-20 16:20:55 +02:00
Guillaume Hivert 4f58a4b2f3 ARSN-210 Restore correct constants in 8.2 to 7.10 backport from ARSN-128 2022-05-20 16:20:55 +02:00
Guillaume Hivert 6f3babd223 ARSN-209 Rename all models to .ts 2022-05-20 16:20:55 +02:00
bert-e d7df1df2b6 Merge branch 'bugfix/ARSN-212-remove-assert-in-decoder' into tmp/octopus/w/8.1/bugfix/ARSN-212-remove-assert-in-decoder 2022-05-20 00:56:02 +00:00
Artem Bakalov 3f26b432b7 ARSN-212 remove assert in decoder in favor of returning an error. 2022-05-19 16:27:05 -07:00
bert-e f59b1b5e07 Merge branches 'w/8.1/feature/ARSN-201-type-check-versioning' and 'q/1894/7.10/feature/ARSN-201-type-check-versioning' into tmp/octopus/q/8.1 2022-05-19 08:51:50 +00:00
bert-e b684bdbaa9 Merge branch 'feature/ARSN-201-type-check-versioning' into q/7.10 2022-05-19 08:51:50 +00:00
Guillaume Hivert a3418603d0 Merge remote-tracking branch 'origin/feature/ARSN-206-type-check-jsutil' into w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-18 11:35:20 +02:00
Guillaume Hivert 947ccd90d9 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-18 11:35:11 +02:00
Guillaume Hivert 23113616d9 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-05-18 11:34:01 +02:00
Guillaume Hivert f460ffdb21 Merge remote-tracking branch 'origin/feature/ARSN-207-type-check-string-hash' into w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-18 11:24:56 +02:00
Guillaume Hivert dfa49c79c5 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-18 11:24:41 +02:00
Guillaume Hivert ba94dc7e86 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-05-18 11:23:08 +02:00
Guillaume Hivert e582882883 Merge remote-tracking branch 'origin/feature/ARSN-208-type-check-db' into w/8.1/feature/ARSN-208-type-check-db 2022-05-18 11:11:30 +02:00
Guillaume Hivert dd61c1abbe Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-208-type-check-db 2022-05-18 11:10:56 +02:00
Guillaume Hivert 5e8f4f2a30 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-05-18 11:09:53 +02:00
Guillaume Hivert a15f8a56e3 Merge remote-tracking branch 'origin/feature/ARSN-201-type-check-versioning' into w/8.1/feature/ARSN-201-type-check-versioning 2022-05-18 11:00:22 +02:00
Guillaume Hivert 43e82f7f33 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-201-type-check-versioning 2022-05-18 11:00:10 +02:00
Guillaume Hivert f54feec57f Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-05-18 10:59:05 +02:00
bert-e d7625ced17 Merge branches 'w/8.1/feature/ARSN-205-type-check-error-utils' and 'q/1901/7.10/feature/ARSN-205-type-check-error-utils' into tmp/octopus/q/8.1 2022-05-17 15:05:31 +00:00
bert-e bbe5f293f4 Merge branch 'feature/ARSN-205-type-check-error-utils' into q/7.10 2022-05-17 15:05:31 +00:00
Guillaume Hivert a2c1989a5d Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-205-type-check-error-utils 2022-05-17 16:58:11 +02:00
bert-e 8ad1cceeb8 Merge branch 'feature/ARSN-204-type-check-shuffle' into q/7.10 2022-05-17 08:19:19 +00:00
bert-e 24755c8472 Merge branches 'w/8.1/feature/ARSN-204-type-check-shuffle' and 'q/1899/7.10/feature/ARSN-204-type-check-shuffle' into tmp/octopus/q/8.1 2022-05-17 08:19:19 +00:00
bert-e bd970c65ea Merge branch 'bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' into q/7.10 2022-05-13 14:29:55 +00:00
bert-e fb39a4095e Merge branches 'w/8.1/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' and 'q/1866/7.10/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' into tmp/octopus/q/8.1 2022-05-13 14:29:55 +00:00
bert-e 32dfba2f89 Merge branch 'bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted' into tmp/octopus/w/8.1/bugfix/ARSN-191-getting-wrong-notification-type-when-master-version-deleted 2022-05-13 14:06:42 +00:00
Kerkesni 43a8772529
bugfix: ARSN-191 fix wrong notification type when master version is deleted 2022-05-13 16:06:05 +02:00
Guillaume Hivert a2ca197bd8 Merge remote-tracking branch 'origin/feature/ARSN-208-type-check-db' into w/8.1/feature/ARSN-208-type-check-db 2022-05-13 15:18:10 +02:00
Guillaume Hivert fc05956983 ARSN-208 Type check DB 2022-05-13 15:16:14 +02:00
Xin LI 3ed46f2d16 improvement: ARSN-180 bump arsenal to 8.1.48 2022-05-13 14:48:51 +02:00
williamlardier 5c936c94ee
ARSN-177: better date check 2022-05-13 14:33:13 +02:00
Xin LI f87101eef6
improvement: ARSN-197 improve code structure 2022-05-13 14:00:38 +02:00
Xin LI 14f86282b6
improvement: ARSN-197 update jsdoc 2022-05-13 13:59:37 +02:00
Xin LI f9dba52d38
improvement: ARSN-197 add index 2022-05-13 13:59:36 +02:00
Yutaka Oishi 6714aed351
improvement: ARSN-197 implement object restore request xml parser 2022-05-13 13:59:36 +02:00
williamlardier 99f96dd377
ARSN-177: accept date as valid date string after stored in the db 2022-05-13 13:59:36 +02:00
williamlardier ae08d89d7d
ARSN-177: set to undefined to clear MD 2022-05-13 13:59:35 +02:00
williamlardier c48e2948f0
ARSN-177: expose new model 2022-05-13 13:59:35 +02:00
williamlardier fc942febca
ARSN-177: better use of undefined and remove unused md field 2022-05-13 13:59:35 +02:00
williamlardier a4fe998c34
ARSN-177: complete unit tests 2022-05-13 13:59:34 +02:00
williamlardier 1460e94488
ARSN-177: return true in validator 2022-05-13 13:59:34 +02:00
williamlardier dcc7117d88
ARSN-177: add tests for new restore field 2022-05-13 13:59:33 +02:00
williamlardier 99cee367aa
ARSN-177: better isValid for class 2022-05-13 13:59:33 +02:00
williamlardier ad5a4c152d
ARSN-177: Introduce archive field in object metadata 2022-05-13 13:59:30 +02:00
bert-e b608c043f5 Merge branch 'feature/ARSN-207-type-check-string-hash' into tmp/octopus/w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-13 11:57:31 +00:00
Guillaume Hivert 8ec4a11a4b ARSN-207 Fix tests and export 2022-05-13 13:57:21 +02:00
bert-e 079c09e1ec Merge branch 'feature/ARSN-207-type-check-string-hash' into tmp/octopus/w/8.1/feature/ARSN-207-type-check-string-hash 2022-05-13 11:55:55 +00:00
Guillaume Hivert c9ff3cd60e ARSN-207 Type check stringHash 2022-05-13 13:55:33 +02:00
bert-e 75f07440ef Merge branch 'feature/ARSN-178-introduce-x-amz-restore-header' into q/8.1 2022-05-13 11:50:07 +00:00
bert-e 3a6bac1158 Merge branch 'feature/ARSN-206-type-check-jsutil' into tmp/octopus/w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-12 15:45:01 +00:00
Guillaume Hivert a15d4cd130 ARSN-206 Add proper index export 2022-05-12 17:44:52 +02:00
bert-e f2d119326a Merge branch 'feature/ARSN-206-type-check-jsutil' into tmp/octopus/w/8.1/feature/ARSN-206-type-check-jsutil 2022-05-12 15:44:27 +00:00
Guillaume Hivert 45ba80ec23 ARSN-206 Type check jsutil 2022-05-12 17:44:07 +02:00
Guillaume Hivert 2a019f3788 ARSN-204 Export errorUtils 2022-05-12 17:26:38 +02:00
bert-e 5e22900c0f Merge branch 'feature/ARSN-205-type-check-error-utils' into tmp/octopus/w/8.1/feature/ARSN-205-type-check-error-utils 2022-05-12 15:25:33 +00:00
Guillaume Hivert 32cff324d8 ARSN-205 Type check errorUtils 2022-05-12 17:24:59 +02:00
Guillaume Hivert e62ed598e8 Merge remote-tracking branch 'origin/feature/ARSN-204-type-check-shuffle' into w/8.1/feature/ARSN-204-type-check-shuffle 2022-05-12 17:20:51 +02:00
Guillaume Hivert cda5d7cfed ARSN-204 Refacto shuffle 2022-05-12 17:19:37 +02:00
bert-e a217ad58e8 Merge branches 'w/8.1/feature/ARSN-186-type-check-clustering' and 'q/1860/7.10/feature/ARSN-186-type-check-clustering' into tmp/octopus/q/8.1 2022-05-12 14:05:31 +00:00
bert-e e46b90cbad Merge branch 'feature/ARSN-186-type-check-clustering' into q/7.10 2022-05-12 14:05:30 +00:00
bert-e 10cf10daa4 Merge branch 'feature/ARSN-185-type-check-patches' into q/8.1 2022-05-12 14:01:57 +00:00
Guillaume Hivert 6ec2f99a91 Merge remote-tracking branch 'origin/development/8.1' into HEAD 2022-05-12 15:53:39 +02:00
bert-e dfd8f20bf2 Merge branch 'q/1858/7.10/feature/ARSN-183-type-check-stream' into tmp/normal/q/8.1 2022-05-12 13:52:32 +00:00
bert-e 435f9f7f3c Merge branch 'feature/ARSN-183-type-check-stream' into q/7.10 2022-05-12 13:52:31 +00:00
Guillaume Hivert fc17ab4299 ARSN-185 Add literal union 2022-05-12 15:51:42 +02:00
Guillaume Hivert 44f398b01f Merge remote-tracking branch 'origin/feature/ARSN-183-type-check-stream' into w/8.1/feature/ARSN-183-type-check-stream 2022-05-12 15:45:01 +02:00
Guillaume Hivert dc32d78b0f Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-183-type-check-stream 2022-05-12 15:43:56 +02:00
Guillaume Hivert 9f1ea09ee6 ARSN-183 Switch index.ts 2022-05-12 15:42:15 +02:00
Guillaume Hivert 073d752ad8 Merge remote-tracking branch 'origin/bugfix/ARSN-97-stop-ignoring-ts-errors-in-yarn-install' into w/8.1/bugfix/ARSN-97-stop-ignoring-ts-errors-in-yarn-install 2022-05-12 15:25:26 +02:00
Guillaume Hivert 37c325f033 ARSN-97 Stop ignoring build errors 2022-05-12 15:20:34 +02:00
bert-e 3454e934f5 Merge branch 'feature/ARSN-201-type-check-versioning' into tmp/octopus/w/8.1/feature/ARSN-201-type-check-versioning 2022-05-12 13:18:29 +00:00
Guillaume Hivert 76bffb2a23 ARSN-201 Fix tests 2022-05-12 15:16:23 +02:00
Guillaume Hivert bd498d414b ARSN-201 Export in index 2022-05-12 15:16:19 +02:00
Guillaume Hivert f98c65ffb4 ARSN-201 Type check VersioningRequestProcessor 2022-05-12 15:16:00 +02:00
Guillaume Hivert eae29c53dd ARSN-201 Type check constants 2022-05-12 15:15:52 +02:00
Guillaume Hivert 8d17b69eb8 ARSN-201 Type check WriteGatheringManager 2022-05-12 15:15:42 +02:00
Guillaume Hivert 938d64f48e ARSN-201 Type check WriteCache 2022-05-12 15:15:28 +02:00
Guillaume Hivert 485ca38867 ARSN-201 Type check VersionID 2022-05-12 15:14:48 +02:00
Guillaume Hivert 355c540510 ARSN-201 Type check Version 2022-05-12 15:14:42 +02:00
Jordi Bertran de Balanda 399fdaaed0 Merge remote-tracking branch 'origin/improvement/ARSN-203-release-7.10.24' into w/8.1/improvement/ARSN-203-release-7.10.24 2022-05-12 15:11:07 +02:00
Jordi Bertran de Balanda d97a218170 ARSN-203 - release 7.10.24 2022-05-12 15:09:45 +02:00
Jordi Bertran de Balanda 5084c8f971 Merge remote-tracking branch 'origin/bugfix/ARSN-199-bugfix-https-proxy-agent' into w/8.1/bugfix/ARSN-199-bugfix-https-proxy-agent 2022-05-12 11:50:33 +02:00
Jordi Bertran de Balanda 82c3330321 ARSN-199 - add https-proxy-agent dependency 2022-05-12 11:28:18 +02:00
williamlardier 3388de6fb6
ARSN-178: set to undefined to clear MD 2022-05-12 09:39:28 +02:00
Guillaume Hivert db70743439 ARSN-201 Rename all files to TS 2022-05-11 15:56:50 +02:00
Alexander Chan 86e9d4a356 ARSN-200: fix probe server readiness path 2022-05-10 14:26:05 -07:00
williamlardier a0010efbdd
ARSN-178: expose new model 2022-05-10 11:09:30 +02:00
Nicolas Humbert 8eb7efd58a ARSN-187 Introduce s3:PutObjectVersion action 2022-05-09 10:47:29 -07:00
williamlardier 25ae7e443b
ARSN-178: remove unused field in test 2022-05-09 16:45:14 +02:00
williamlardier 4afa1ed78d
ARSN-178: better use of undefined and remove unused md field 2022-05-09 16:45:14 +02:00
williamlardier 706dfddf5f
ARSN-178: complete unit tests 2022-05-09 16:45:13 +02:00
williamlardier 4cce306a12
ARSN-178: return true in validator 2022-05-09 16:45:13 +02:00
williamlardier f3bf6f2615
ARSN-178: better isValid for AmzRestore class 2022-05-09 16:45:13 +02:00
williamlardier bbe51b2e5e
ARSN-178: add tests for AmzRestore header 2022-05-09 16:45:12 +02:00
williamlardier 3cd06256d6
ARSN-178: add model in ObjectMD 2022-05-09 16:45:12 +02:00
Yutaka Oishi 6e42216549
ARSN-178: Add AmzRestore header and model 2022-05-09 16:45:11 +02:00
williamlardier e37712e94f
ARSN-195: bump arsenal 2022-05-09 16:28:22 +02:00
williamlardier ac30d29509
ARSN-195: add missing exports for 8.x 2022-05-09 16:25:52 +02:00
Xin LI 1f235d569d improvement: release 8.1.46 2022-05-09 15:32:39 +02:00
williamlardier 320713a764
Merge remote-tracking branch 'origin/bugfix/ARSN-195-fix-ts-migration-bugs' into w/8.1/bugfix/ARSN-195-fix-ts-migration-bugs 2022-05-09 14:59:31 +02:00
williamlardier 4594578919
ARSN-195: add unit test for getMetaHeaders 2022-05-09 14:57:52 +02:00
williamlardier bc0cb0a8fe
ARSN-195: fix arsenal bugs and missing default in require 2022-05-09 14:57:51 +02:00
williamlardier 9e0cee849c
ARSN-195: fix index for s3middleware 2022-05-09 14:57:48 +02:00
Artem Bakalov fbf686feab ARSN-194 disable short version id by default 2022-05-06 20:44:23 +00:00
Guillaume Hivert 4b795a245c ARSN-184 Fix tests 2022-05-06 16:03:36 +02:00
Guillaume Hivert 983d59d565 ARSN-184 Fix responseBody test 2022-05-06 15:58:04 +02:00
Guillaume Hivert fd7f0a1a91 ARSN-184 Fix merge 2022-05-06 15:41:42 +02:00
bert-e 459fd99316 Merge branches 'development/8.1' and 'feature/ARSN-184-type-check-s3routes' into tmp/octopus/w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-06 13:21:27 +00:00
Guillaume Hivert d6e4bca3ed ARSN-184 Remove useless signatures 2022-05-06 15:21:17 +02:00
Guillaume Hivert 235b2ac6d4 Merge remote-tracking branch 'origin/feature/ARSN-184-type-check-s3routes' into w/8.1/feature/ARSN-184-type-check-s3routes 2022-05-06 15:19:05 +02:00
bert-e f49006a64e Merge branch 'feature/ARSN-171-type-s3-middlewares' into q/7.10 2022-05-06 12:50:22 +00:00
bert-e 8025ce08fe Merge branches 'w/8.1/feature/ARSN-171-type-s3-middlewares' and 'q/1844/7.10/feature/ARSN-171-type-s3-middlewares' into tmp/octopus/q/8.1 2022-05-06 12:50:22 +00:00
Guillaume Hivert 75811ba553 ARSN-184 Exports 2022-05-06 14:45:44 +02:00
Guillaume Hivert 26de19b22b ARSN-184 Type check routeWebsite 2022-05-06 14:26:40 +02:00
Guillaume Hivert 72bdd130f0 ARSN-184 Type check routePUT 2022-05-06 14:26:40 +02:00
Guillaume Hivert 4131732b74 ARSN-184 Type check routePOST 2022-05-06 14:26:40 +02:00
Guillaume Hivert 7cecbe27be ARSN-184 Type check routeOPTIONS 2022-05-06 14:26:40 +02:00
Guillaume Hivert 3fab05071d ARSN-184 Type check routeHEAD 2022-05-06 14:26:40 +02:00
Guillaume Hivert a98f2cede5 ARSN-184 Type check routeGET 2022-05-06 14:26:40 +02:00
Guillaume Hivert 283a0863c2 ARSN-184 Type check routeDELETE 2022-05-06 14:26:40 +02:00
Guillaume Hivert 18b089fc2d ARSN-184 Type check routes 2022-05-06 14:26:40 +02:00
Guillaume Hivert 60139abb10 ARSN-184 Type check routesUtils 2022-05-06 14:26:40 +02:00
Guillaume Hivert 2cc1a9886f ARSN-184 WIP Routes 2022-05-06 14:26:40 +02:00
Guillaume Hivert 1c7122b7e4 ARSN-184 Type check routesUtils 2022-05-06 14:26:40 +02:00
Guillaume Hivert 4eba3ca6a0 ARSN-184 Type check routes 2022-05-06 14:26:40 +02:00
Guillaume Hivert 670d57a9db ARSN-184 Fix StatsClient 2022-05-06 14:26:40 +02:00
Guillaume Hivert 8784113544 ARSN-184 Move all .js to .ts files 2022-05-06 14:26:40 +02:00
bert-e bffb00266f Merge branch 'dependabot/npm_and_yarn/ajv-6.12.3' into q/8.1 2022-05-05 17:00:41 +00:00
bert-e a6cd3a67e0 Merge branch 'dependabot/npm_and_yarn/node-forge-1.3.0' into q/8.1 2022-05-05 17:00:37 +00:00
dependabot[bot] 18605a9546
Bump ajv from 6.12.2 to 6.12.3
Bumps [ajv](https://github.com/ajv-validator/ajv) from 6.12.2 to 6.12.3.
- [Release notes](https://github.com/ajv-validator/ajv/releases)
- [Commits](https://github.com/ajv-validator/ajv/compare/v6.12.2...v6.12.3)

---
updated-dependencies:
- dependency-name: ajv
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 15:44:27 +00:00
dependabot[bot] 74d7fe5e68
Bump node-forge from 0.7.6 to 1.3.0
Bumps [node-forge](https://github.com/digitalbazaar/forge) from 0.7.6 to 1.3.0.
- [Release notes](https://github.com/digitalbazaar/forge/releases)
- [Changelog](https://github.com/digitalbazaar/forge/blob/main/CHANGELOG.md)
- [Commits](https://github.com/digitalbazaar/forge/compare/0.7.6...v1.3.0)

---
updated-dependencies:
- dependency-name: node-forge
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 15:42:41 +00:00
dependabot[bot] e707cf4398
Bump async from 2.6.3 to 2.6.4
Bumps [async](https://github.com/caolan/async) from 2.6.3 to 2.6.4.
- [Release notes](https://github.com/caolan/async/releases)
- [Changelog](https://github.com/caolan/async/blob/v2.6.4/CHANGELOG.md)
- [Commits](https://github.com/caolan/async/compare/v2.6.3...v2.6.4)

---
updated-dependencies:
- dependency-name: async
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-05 14:56:17 +00:00
bert-e 47c34a4f5c Merge branch 'dependabot/npm_and_yarn/minimist-1.2.6' into q/8.1 2022-05-05 14:33:37 +00:00
bert-e 59f7e32037 Merge branch 'feature/ARSN-179-support-restore-object' into q/8.1 2022-05-05 10:29:37 +00:00
Jordi Bertran de Balanda fb286c6403 Merge remote-tracking branch 'origin/improvement/ARSN-190-release-error-fixes' into w/8.1/improvement/ARSN-190-release-error-fixes
Release 8.1.45
2022-05-05 12:09:39 +02:00
Jordi Bertran de Balanda c9f279ac9b ARSN-190 - release 7.10.23 2022-05-05 12:04:40 +02:00
williamlardier 7f93695300
ARSN-179: add s3 action map for RestoreObject 2022-05-05 10:10:52 +02:00
bert-e cecb5fc1b1 Merge branch 'w/8.1/bugfix/ARSN-188-fix-mongoclient-errors' into tmp/octopus/q/8.1 2022-05-05 08:07:35 +00:00
bert-e 75ba3733aa Merge branch 'bugfix/ARSN-182-error-while-listing-objects' into q/8.1 2022-05-05 06:20:16 +00:00
dependabot[bot] 7c6f5d34b8
Bump minimist from 1.2.5 to 1.2.6
Bumps [minimist](https://github.com/substack/minimist) from 1.2.5 to 1.2.6.
- [Release notes](https://github.com/substack/minimist/releases)
- [Commits](https://github.com/substack/minimist/compare/1.2.5...1.2.6)

---
updated-dependencies:
- dependency-name: minimist
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-05-04 18:04:18 +00:00
bert-e 7e3190a600 Merge branch 'bugfix/ARSN-188-fix-mongoclient-errors' into tmp/octopus/w/8.1/bugfix/ARSN-188-fix-mongoclient-errors 2022-05-04 12:41:33 +00:00
Jordi Bertran de Balanda e9c4a5ce99 ARSN-189 - fix probe exports 2022-05-04 11:54:13 +02:00
Jordi Bertran de Balanda 2622781a1d ARSN-188 - mop up stray address equality checks 2022-05-04 11:38:36 +02:00
Guillaume Hivert f378a85799 ARSN-185 Type Check patches/locationConstraints 2022-05-03 17:54:00 +02:00
bert-e 23ea19bcb3 Merge branch 'feature/ARSN-186-type-check-clustering' into tmp/octopus/w/8.1/feature/ARSN-186-type-check-clustering 2022-05-03 15:36:22 +00:00
Guillaume Hivert c6249cd2d5 ARSN-186 Type check Clustering 2022-05-03 17:35:46 +02:00
KillianG d2c1400cb6
ARSN-182 : Return error from callback intead of internal error 2022-05-03 17:25:35 +02:00
Guillaume Hivert 97019d3b44 ARSN-186 Move Clustering.js to Clustering.ts 2022-05-03 17:16:26 +02:00
bert-e 6da31dfd18 Merge branch 'feature/ARSN-183-type-check-stream' into tmp/octopus/w/8.1/feature/ARSN-183-type-check-stream 2022-05-03 15:14:50 +00:00
Guillaume Hivert 75b4e6328e Type check stream 2022-05-03 17:11:34 +02:00
Guillaume Hivert eb9f936e78 Move readJSONStreamObject from .js to .ts 2022-05-03 16:59:10 +02:00
Yutaka Oishi ee1e65d778
ARSN-179: add route for RestoreObject API 2022-05-03 15:14:12 +02:00
williamlardier 3534927ccf
ARSN-179: add action map for RestoreObject API 2022-05-03 15:11:29 +02:00
Jordi Bertran de Balanda 0e3edb847e Merge remote-tracking branch 'origin/improvement/ARSN-181-release-after-error-backward-compat' into w/8.1/improvement/ARSN-181-release-after-error-backward-compat 2022-05-03 11:04:32 +02:00
Jordi Bertran de Balanda d1930c08e8 ARSN-181 - release 7.10.22 2022-05-03 10:53:40 +02:00
bert-e a9f9fe99a5 Merge branches 'w/8.1/feature/ARSN-175-fix-errors-backwards' and 'q/1846/7.10/feature/ARSN-175-fix-errors-backwards' into tmp/octopus/q/8.1 2022-05-02 17:33:37 +00:00
bert-e 3dd0fbfc80 Merge branch 'feature/ARSN-175-fix-errors-backwards' into q/7.10 2022-05-02 17:33:37 +00:00
Jordi Bertran de Balanda a587f78242 Merge remote-tracking branch 'origin/feature/ARSN-175-fix-errors-backwards' into w/8.1/feature/ARSN-175-fix-errors-backwards 2022-05-02 19:26:36 +02:00
Guillaume Hivert 2202ebac8a ARSN-175 Restores old behaviors of errors 2022-05-02 19:19:17 +02:00
Guillaume Hivert 40e5100cd8 ARSN-173 Fix BackendInfo 2022-04-29 18:08:19 +02:00
Guillaume Hivert 0851aa1406 Merge remote-tracking branch 'origin/feature/ARSN-171-type-s3-middlewares' into w/8.1/feature/ARSN-171-type-s3-middlewares 2022-04-29 17:47:22 +02:00
Guillaume Hivert 5c16601657 ARSN-171 Fix tests 2022-04-29 17:05:07 +02:00
Guillaume Hivert 3ff3330f1a ARSN-171 Type check s3middleware/validateConditionalHeaders 2022-04-29 17:05:07 +02:00
Guillaume Hivert 5b02d20e4d ARSN-171 Type check s3middleware/userMetadata 2022-04-29 17:05:07 +02:00
Guillaume Hivert 867da9a3d0 ARSN-171 Type check s3middleware/tagging 2022-04-29 17:05:07 +02:00
Guillaume Hivert c9f6d35fa4 ARSN-171 Type check s3middleware/processMpuParts 2022-04-29 17:05:07 +02:00
Guillaume Hivert c79a5c2ee3 ARSN-171 Type check s3middleware/objectRetention 2022-04-29 17:05:07 +02:00
Guillaume Hivert a400beb8b9 ARSN-171 Type check s3middleware/objectLegalHold 2022-04-29 17:05:07 +02:00
Guillaume Hivert 8ce0b07e63 ARSN-171 Backport constants to 7.10 2022-04-29 17:05:07 +02:00
Guillaume Hivert a0876d3df5 ARSN-171 Type prepareStream and refactor V4Transform to export type 2022-04-29 17:05:07 +02:00
Guillaume Hivert e829fa3d3f ARSN-171 Type objectUtils 2022-04-29 17:05:07 +02:00
Guillaume Hivert da25890556 ARSN-171 Type objectLegalHold 2022-04-29 17:05:07 +02:00
Guillaume Hivert 8df0f5863a ARSN-171 Type nullStream 2022-04-29 17:05:07 +02:00
Guillaume Hivert 2d66248303 ARSN-171 Add Types for xml2js 2022-04-29 17:05:07 +02:00
Guillaume Hivert 8221852eef ARSN-171 Type LifecycleUtils and LifecycleHelpers 2022-04-29 17:05:07 +02:00
Guillaume Hivert d50e1bfd6d ARSN-171 Type LifecycleDatetime 2022-04-29 17:05:07 +02:00
Guillaume Hivert 5f453789d4 ARSN-171 Type convertToXml 2022-04-29 17:05:07 +02:00
Guillaume Hivert 7658481128 ARSN-171 Type mpuUtils 2022-04-29 17:05:07 +02:00
Guillaume Hivert 593bb31ac3 ARSN-171 Type SubStreamInterface 2022-04-29 14:51:04 +02:00
Guillaume Hivert f5e89c9660 ARSN-171 Type ResultsCollector 2022-04-29 14:51:04 +02:00
Guillaume Hivert 62db2267fc ARSN-171 Type MD5Sum 2022-04-29 14:51:04 +02:00
Guillaume Hivert f6544f7a2e ARSN-171 Move all files from JS to TS 2022-04-29 14:51:04 +02:00
bert-e 3ce4effafb Merge branch 'bugfix/ARSN-172-fix-invalid-timestamp' into tmp/octopus/w/8.1/bugfix/ARSN-172-fix-invalid-timestamp 2022-04-29 12:15:13 +00:00
Kerkesni 5ec6acc061
bugfix: ARSN-172 fix invalid timestamp in the oplog entries 2022-04-29 14:11:05 +02:00
bert-e 6c7a1316ae Merge branch 'feature/ARSN-161-type-network' into q/7.10 2022-04-29 11:59:29 +00:00
bert-e b1897708e5 Merge branches 'w/8.1/feature/ARSN-161-type-network' and 'q/1839/7.10/feature/ARSN-161-type-network' into tmp/octopus/q/8.1 2022-04-29 11:59:29 +00:00
bert-e 019907e2ab Merge branch 'feature/ARSN-160-support-invalid-arguments-in-errors' into q/8.1 2022-04-29 07:59:59 +00:00
bert-e 73729c7bdb Merge branches 'development/8.1' and 'feature/ARSN-161-type-network' into tmp/octopus/w/8.1/feature/ARSN-161-type-network 2022-04-28 14:42:16 +00:00
Guillaume Hivert d6635097c7 ARSN-161 Remove useless type, fix some typo and add explicit parens 2022-04-28 16:42:04 +02:00
Kerkesni 3f5e553d8a
feature: ARSN-160 add support for invalid arguments in errors 2022-04-28 15:44:04 +02:00
bert-e efea69ff70 Merge branches 'w/8.1/feature/ARSN-159-type-policy-evaluator' and 'q/1832/7.10/feature/ARSN-159-type-policy-evaluator' into tmp/octopus/q/8.1 2022-04-28 08:34:32 +00:00
bert-e 187ba67cc8 Merge branch 'feature/ARSN-159-type-policy-evaluator' into q/7.10 2022-04-28 08:34:31 +00:00
Guillaume Hivert 8a2b62815b ARSN-159 Add RequesterInfo unknown fields 2022-04-28 10:26:12 +02:00
bert-e 0dbbb80bea Merge branches 'w/8.1/feature/ARSN-156/release-7.10.21' and 'q/1836/7.10/feature/ARSN-156/release-7.10.21' into tmp/octopus/q/8.1 2022-04-27 16:53:55 +00:00
bert-e c808873996 Merge branch 'feature/ARSN-156/release-7.10.21' into q/7.10 2022-04-27 16:53:55 +00:00
Guillaume Hivert 2eecda3079 ARSN-161 Fix rest/utils.ts 2022-04-27 18:08:33 +02:00
bert-e 011606e146 Merge branch 'feature/ARSN-161-type-network' into tmp/octopus/w/8.1/feature/ARSN-161-type-network 2022-04-27 16:01:22 +00:00
Guillaume Hivert a3378c3df5 ARSN-161 Fix ersatz wrong import 2022-04-27 18:01:13 +02:00
Guillaume Hivert 8271b3ba21 ARSN-161 Type HealthProbeServer and Utils.ts 2022-04-27 17:59:45 +02:00
Guillaume Hivert a1b980b95b Merge remote-tracking branch 'origin/feature/ARSN-161-type-network' into w/8.1/feature/ARSN-161-type-network 2022-04-27 17:47:03 +02:00
Guillaume Hivert e063eeeced ARSN-161 Fix tests and index.ts 2022-04-27 17:37:01 +02:00
Guillaume Hivert a5051cffba ARSN-161 Type network/kmip 2022-04-27 17:36:45 +02:00
Guillaume Hivert 24deac9f92 ARSN-161 Move .js to .ts files in network/kmip 2022-04-27 17:36:27 +02:00
Guillaume Hivert 3621c7bc77 ARSN-161 Type network/rest 2022-04-27 17:36:09 +02:00
Guillaume Hivert 57c2d4fcd8 ARSN-161 Migrate files from .js to .ts and add type-checking 2022-04-27 17:35:44 +02:00
bert-e 4c47264a78 Merge branches 'w/8.1/bugfix/ARSN-168-fix-flatten-errors' and 'q/1830/7.10/bugfix/ARSN-168-fix-flatten-errors' into tmp/octopus/q/8.1 2022-04-27 09:21:16 +00:00
bert-e 835ffe79c6 Merge branch 'bugfix/ARSN-168-fix-flatten-errors' into q/7.10 2022-04-27 09:21:16 +00:00
bert-e f69087814e Merge branch 'bugfix/ARSN-168-fix-flatten-errors' into tmp/octopus/w/8.1/bugfix/ARSN-168-fix-flatten-errors 2022-04-27 08:19:38 +00:00
Ronnie Smith cd432fa920
Merge remote-tracking branch 'origin/feature/ARSN-156/release-7.10.21' into w/8.1/feature/ARSN-156/release-7.10.21 2022-04-26 20:15:09 -07:00
Ronnie Smith 1ac27e8125
feature: release 7.10.21 2022-04-26 20:11:31 -07:00
Ronnie Smith af0ab673d7
Merge remote-tracking branch 'origin/feature/ARSN-156/backport-data-retrieval-style' into w/8.1/feature/ARSN-156/backport-data-retrieval-style 2022-04-26 16:40:39 -07:00
Ronnie Smith deb88ae03b
feature: ARSN-156 update route type checks 2022-04-26 16:14:29 -07:00
Ronnie Smith 334edbc17b
Merge remote-tracking branch 'origin/feature/ARSN-156/backport-data-retrieval-style' into w/8.1/feature/ARSN-156/backport-data-retrieval-style 2022-04-26 15:47:14 -07:00
Ronnie Smith a2777d929e
feature: ARSN-156 backport data retrieval style 2022-04-26 15:44:53 -07:00
bert-e 271b28e59b Merge branch 'feature/ARSN-159-type-policy-evaluator' into tmp/octopus/w/8.1/feature/ARSN-159-type-policy-evaluator 2022-04-26 15:16:42 +00:00
Guillaume Hivert 03c7b6ea3e ARSN-159 Type policyEvaluator 2022-04-26 17:04:39 +02:00
Guillaume Hivert 872034073e ARSN-159 Type requestUtils 2022-04-26 17:04:25 +02:00
Guillaume Hivert 3d39b61a46 ARSN-170 Type ipCheck 2022-04-26 17:04:18 +02:00
Guillaume Hivert c55c790a5d ARSN-159 Move everything to TS 2022-04-26 16:59:51 +02:00
Jordi Bertran de Balanda ccbc1ed10c ARSN-168 - make flatten/unflatten on ArsenalError 2022-04-26 16:03:33 +02:00
bert-e 7f641d2755 Merge branches 'q/1828/7.10/bugfix/ARSN-167/backbeat' and 'w/8.1/bugfix/ARSN-167/backbeat' into tmp/octopus/q/8.1 2022-04-26 13:13:02 +00:00
bert-e 348c80060e Merge branch 'bugfix/ARSN-167/backbeat' into q/7.10 2022-04-26 13:13:02 +00:00
bert-e df91750c5a Merge branch 'bugfix/ARSN-167/backbeat' into tmp/octopus/w/8.1/bugfix/ARSN-167/backbeat 2022-04-26 12:55:23 +00:00
bert-e b81d24c3ef Merge branch 'feature/ARSN-169/release-7.10.19' into q/7.10 2022-04-26 01:45:40 +00:00
bert-e 1f2caf6a01 Merge branches 'w/8.1/feature/ARSN-169/release-7.10.19' and 'q/1826/7.10/feature/ARSN-169/release-7.10.19' into tmp/octopus/q/8.1 2022-04-26 01:45:40 +00:00
Ronnie Smith 1333195dcd
Merge remote-tracking branch 'origin/feature/ARSN-169/release-7.10.19' into w/8.1/feature/ARSN-169/release-7.10.19 2022-04-25 18:38:40 -07:00
bert-e f822c7bad9 Merge branch 'q/1812/7.10/improvement/ARSN-157-short-IDs' into tmp/normal/q/8.1 2022-04-26 01:03:38 +00:00
bert-e b3ce76d7d8 Merge branch 'w/8.1/improvement/ARSN-157-short-IDs' into tmp/normal/q/8.1 2022-04-26 01:03:38 +00:00
bert-e c03c67d9fb Merge branch 'improvement/ARSN-157-short-IDs' into q/7.10 2022-04-26 01:03:37 +00:00
Ronnie Smith 0f72b7c188
feature: ARSN-169 update version 2022-04-25 18:00:19 -07:00
Artem Bakalov 18887d10b3 Merge remote-tracking branch 'origin/improvement/ARSN-157-short-IDs' into w/8.1/improvement/ARSN-157-short-IDs 2022-04-26 00:31:05 +00:00
Artem Bakalov 07fd3451ab ARSN-157 short-IDs 2022-04-26 00:23:33 +00:00
Ronnie Smith 223897bbff
Merge remote-tracking branch 'origin/feature/ARSN-164/rpc-error-and-other-updates' into w/8.1/feature/ARSN-164/rpc-error-and-other-updates 2022-04-25 16:47:46 -07:00
Ronnie Smith 473e241d5c
feature: ARSN-164 rpc error utils missing is
* added a few missing constants
* fix a few more err.is usages
2022-04-25 16:28:41 -07:00
bert-e e4d888c07b Merge branch 'improvement/ARSN-165/improveMongoDBClientInterfaceLogging' into q/8.1 2022-04-25 22:50:16 +00:00
bert-e dece118ba9 Merge branch 'q/1814/7.10/improvement/ARSN-162-add-getBucketTagging-error' into tmp/normal/q/8.1 2022-04-25 16:44:52 +00:00
bert-e ffe53ab72e Merge branch 'improvement/ARSN-162-add-getBucketTagging-error' into q/7.10 2022-04-25 16:44:51 +00:00
Will Toozs a077cc199f
ARSN-162: revert NoSuchTagSet error addition 2022-04-25 12:45:33 +02:00
bert-e b0cb6d9c0f Merge branch 'improvement/ARSN-162-add-getBucketTagging-error' into tmp/octopus/w/8.1/improvement/ARSN-162-add-getBucketTagging-error 2022-04-25 08:50:17 +00:00
Nicolas Humbert c13cff150f ARSN-167 Fix zenko metrics 2022-04-24 17:52:08 -04:00
Alexander Chan e0da963226 ARSN-165: getLatestVersion - skip error logs for NoSuchKey errors from getLatestVersion 2022-04-22 11:31:46 -07:00
bert-e 209f3bae44 Merge branches 'w/8.1/feature/ARSN-158-type-policy' and 'q/1810/7.10/feature/ARSN-158-type-policy' into tmp/octopus/q/8.1 2022-04-22 15:55:26 +00:00
bert-e e446f20223 Merge branch 'feature/ARSN-158-type-policy' into q/7.10 2022-04-22 15:55:25 +00:00
Guillaume Hivert e311f0d83d Fix StatsModel 2022-04-22 17:47:21 +02:00
Guillaume Hivert dab763884a Merge remote-tracking branch 'origin/feature/ARSN-147-type-metrics' into w/8.1/feature/ARSN-147-type-metrics 2022-04-22 17:42:21 +02:00
Guillaume Hivert 4f22e526ee Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-147-type-metrics 2022-04-22 17:41:44 +02:00
Guillaume Hivert dd0ca967c4 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-04-22 17:39:14 +02:00
Guillaume Hivert 3951bb289c Merge remote-tracking branch 'origin/feature/ARSN-146-type-https' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:27:36 +02:00
Guillaume Hivert 7b0bb25358 ARSN-99 Export HTTPS 2022-04-22 17:26:07 +02:00
Guillaume Hivert b97de6505c Merge remote-tracking branch 'origin/feature/ARSN-146-type-https' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:24:35 +02:00
Guillaume Hivert a5ad298c3b Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-146-type-https 2022-04-22 17:23:45 +02:00
Guillaume Hivert 57ab049565 Merge remote-tracking branch 'origin/development/7.10' into HEAD 2022-04-22 17:20:56 +02:00
bert-e 6919af95f2 Merge branches 'w/8.1/feature/ARSN-99-type-check-auth-folder' and 'q/1797/7.10/feature/ARSN-99-type-check-auth-folder' into tmp/octopus/q/8.1 2022-04-22 13:56:53 +00:00
bert-e 6a5f0964ff Merge branch 'feature/ARSN-99-type-check-auth-folder' into q/7.10 2022-04-22 13:56:53 +00:00
Guillaume Hivert b94c13a115 ARSN-99 Update yarn 2022-04-22 14:45:39 +02:00
Guillaume Hivert 666da6b1aa Merge remote-tracking branch 'origin/feature/ARSN-99-type-check-auth-folder' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-22 14:43:47 +02:00
Guillaume Hivert 7192d4bc93 Merge remote-tracking branch 'origin/development/8.1' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-22 14:40:18 +02:00
Guillaume Hivert 66043e5cd0 ARSN-99 Fix tests 2022-04-22 12:09:32 +02:00
Guillaume Hivert bb2951be2c ARSN-99 Catch up changes in v4/streamingV4 2022-04-22 12:03:48 +02:00
Guillaume Hivert 0d68de5ec4 ARSN-99 Restore constants.emptyStringHash 2022-04-22 12:03:40 +02:00
Guillaume Hivert f4e43f2cc7 ARSN-99 Fix Naming Credentials and various function names 2022-04-22 12:03:33 +02:00
Guillaume Hivert b829b7662e ARSN-99 Migrate auth/v4/streamingV4 to TS 2022-04-22 12:01:15 +02:00
Will Toozs e4be1d8d35
ARSN-162: add getBucketTagging NoSuchTagSet error 2022-04-21 18:22:30 +02:00
bert-e 1523f6baa6 Merge branch 'feature/ARSN-158-type-policy' into tmp/octopus/w/8.1/feature/ARSN-158-type-policy 2022-04-20 14:04:48 +00:00
Guillaume Hivert 941d3ba73d ARSN-158 Fix linter 2022-04-20 15:48:39 +02:00
bert-e 9556d5cd61 Merge branch 'bugfix/ARSN-155-export-network-http-utils' into q/7.10 2022-04-20 13:17:43 +00:00
bert-e c517e4531a Merge branches 'w/8.1/bugfix/ARSN-155-export-network-http-utils' and 'q/1804/7.10/bugfix/ARSN-155-export-network-http-utils' into tmp/octopus/q/8.1 2022-04-20 13:17:43 +00:00
Guillaume Hivert 1fc6c2db86 ARSN-158 Fix test 2022-04-20 14:56:57 +02:00
Guillaume Hivert c5949b547d ARSN-158 Type policy 2022-04-20 14:46:19 +02:00
Kerkesni 7bcb81985a
feature: ARSN-152 add s3:ObjectAcl:put notification support 2022-04-20 14:11:52 +02:00
bert-e 68ac02ad54 Merge branch 'feature/ARSN-154-support-object-tagging-notifications' into q/8.1 2022-04-20 12:02:45 +00:00
Guillaume Hivert 0d479c82c5 Fix tests 2022-04-20 11:19:22 +02:00
Guillaume Hivert f958ed3204 Merge remote-tracking branch 'origin/feature/ARSN-147-type-metrics' into w/8.1/feature/ARSN-147-type-metrics 2022-04-20 11:12:17 +02:00
Guillaume Hivert 3fdd6b8e80 ARSN-147 Export from metrics folder 2022-04-20 10:44:56 +02:00
Guillaume Hivert 4193511d1b ARSN-147 Type ZenkoMetrics 2022-04-20 10:36:51 +02:00
Guillaume Hivert 3bf00b14b8 ARSN-147 Type StatsModel 2022-04-20 10:36:42 +02:00
Guillaume Hivert 7d4c22594f ARSN-147 Type StatsClients 2022-04-20 10:36:28 +02:00
Guillaume Hivert 6f588c00d7 ARSN-147 Type RedisClient 2022-04-20 10:36:12 +02:00
Guillaume Hivert 441630d57e ARSN-147 Convert files to TS 2022-04-20 10:35:48 +02:00
bert-e 7d80db5d7f Merge branch 'feature/ARSN-146-type-https' into tmp/octopus/w/8.1/feature/ARSN-146-type-https 2022-04-20 08:31:17 +00:00
Guillaume Hivert 3946a01871 ARSN-146 Type HTTPS 2022-04-20 10:29:28 +02:00
bert-e 34ef6d0434 Merge branch 'bugfix/ARSN-155-export-network-http-utils' into tmp/octopus/w/8.1/bugfix/ARSN-155-export-network-http-utils 2022-04-19 16:10:28 +00:00
Jordi Bertran de Balanda 6f36a85353 ARSN-155 - export utils for cloudserver 2022-04-19 18:09:45 +02:00
bert-e 0ce6a79961 Merge branch 'feature/ARSN-153/modify-errors-is' into tmp/octopus/w/8.1/feature/ARSN-153/modify-errors-is 2022-04-19 14:24:56 +00:00
Guillaume Hivert 5d4ed36096 Improve errors API 2022-04-19 16:22:04 +02:00
Kerkesni 7477b881ed
feature: ARSN-154 add support for s3:ObjectTagging notifications 2022-04-19 15:49:51 +02:00
Guillaume Hivert 3874d16f42 Merge remote-tracking branch 'origin/feature/ARSN-99-type-check-auth-folder' into w/8.1/feature/ARSN-99-type-check-auth-folder 2022-04-19 10:53:32 +02:00
Guillaume Hivert 282dc7afb3 ARSN-108 Fix ESLint complains 2022-04-15 15:57:40 +02:00
Guillaume Hivert 617ec1f500 ARSN-108 Fix test suites 2022-04-15 15:55:03 +02:00
Guillaume Hivert 37157118af ARSN-108 Type auth/auth 2022-04-15 15:44:24 +02:00
Guillaume Hivert 33bea4adb3 ARSN-108 Type auth/v4 2022-04-15 15:43:25 +02:00
Guillaume Hivert a0b62a9948 ARSN-103 Type auth/v2 2022-04-15 15:43:01 +02:00
Guillaume Hivert c7c2c7ffaa ARSN-108 Type auth/Vault 2022-04-15 15:42:36 +02:00
Guillaume Hivert 362b82326e ARSN-108 Type auth/AuthInfo 2022-04-15 15:42:19 +02:00
Guillaume Hivert 38d462c833 ARSN-108 Type auth/in_memory/Indexer 2022-04-15 15:42:19 +02:00
Guillaume Hivert 7b73e34f9f ARSN-108 Type auth/in_memory/validateAuthConfig 2022-04-15 15:42:19 +02:00
Guillaume Hivert d88ad57032 ARSN-108 Type auth/in_memory/Backend 2022-04-15 15:42:15 +02:00
Guillaume Hivert 800f79f125 ARSN-108 Type for auth/in_memory/vaultUtilities 2022-04-15 15:41:37 +02:00
Guillaume Hivert 522dfbc0db ARSN-98 Type auth/in_memory/AuthLoader 2022-04-15 15:41:37 +02:00
Guillaume Hivert 918ad4c7c2 ARSN-108 Type constants 2022-04-15 15:41:32 +02:00
Guillaume Hivert 2c8e611a15 ARSN-98 ARSN-108 ARSN-103 Add joi, eslint, simple-glob interface, @types/async and @types/utf8 to make TS compiler happy 2022-04-15 15:36:39 +02:00
Guillaume Hivert 0158fb0967 ARSN-108 Rename auth js files to ts files, and constants.js to constants.ts 2022-04-15 15:35:55 +02:00
Guillaume Hivert fac5605a18 Bump version to 8.1.41 2022-04-15 11:19:36 +02:00
Guillaume Hivert fd33b9271b Bump version to 7.10.18 2022-04-15 11:13:28 +02:00
bert-e 72057b1efc Merge branch 'q/1736/7.10/feature/ARSN-98-migrate-errors-to-typescript' into tmp/normal/q/8.1 2022-04-15 09:03:24 +00:00
bert-e 529840fa37 Merge branch 'w/8.1/feature/ARSN-98-migrate-errors-to-typescript' into tmp/normal/q/8.1 2022-04-15 09:03:24 +00:00
bert-e 0e7c47a7e9 Merge branch 'feature/ARSN-98-migrate-errors-to-typescript' into q/7.10 2022-04-15 09:03:21 +00:00
Guillaume Hivert 0a5f7c4ea9 Fix JS linter 2022-04-15 10:56:24 +02:00
bert-e 0e4ac99d9d Merge branch 'feature/ARSN-142-add-replication-failed-notification-support' into q/8.1 2022-04-14 22:21:20 +00:00
KillianG 218d21b819
Merge remote-tracking branch 'origin/improvement/ARSN-148-release-arsenal-7-10-17' into w/8.1/improvement/ARSN-148-release-arsenal-7-10-17 2022-04-14 19:23:54 +02:00
KillianG 0b51a6a3f0
ARSN-148: release arsenal 7 10 17 2022-04-14 18:58:40 +02:00
bert-e 9333323301 Merge branches 'w/8.1/improvement/ARSN-140-add-get-bucket-tagging-to-action-map' and 'q/1790/7.10/improvement/ARSN-140-add-get-bucket-tagging-to-action-map' into tmp/octopus/q/8.1 2022-04-14 16:49:41 +00:00
bert-e 67639f64d4 Merge branch 'improvement/ARSN-140-add-get-bucket-tagging-to-action-map' into q/7.10 2022-04-14 16:49:41 +00:00
bert-e e5929b9f91 Merge branch 'improvement/ARSN-140-add-get-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-140-add-get-bucket-tagging-to-action-map 2022-04-14 16:43:33 +00:00
bert-e 8998544c06 Merge branches 'w/8.1/improvement/ARSN-139-delete-bucket-tagging-to-action-map' and 'q/1789/7.10/improvement/ARSN-139-delete-bucket-tagging-to-action-map' into tmp/octopus/q/8.1 2022-04-14 16:37:07 +00:00
bert-e 36fd21a3cd Merge branch 'improvement/ARSN-139-delete-bucket-tagging-to-action-map' into q/7.10 2022-04-14 16:37:06 +00:00
KillianG df33583aea
fixup lint 2022-04-14 18:30:56 +02:00
KillianG 050d649db5
fix conflict 2022-04-14 18:27:21 +02:00
bert-e de81f65306 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 11:58:09 +00:00
Killian Gardahaut 48fe6779bb
Update actionMaps.js 2022-04-14 13:58:03 +02:00
Killian Gardahaut 6acc199eca
Update lib/policyEvaluator/utils/actionMaps.js
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2022-04-14 10:51:18 +02:00
Killian Gardahaut 6eff4565dd
Update lib/policyEvaluator/utils/actionMaps.js
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2022-04-14 10:51:06 +02:00
KillianG 8cc333e7f7
ARSN-140: add get bucket tagging to action map 2022-04-14 09:54:49 +02:00
KillianG cbcaa97abb
ARSN-139: add delete bucket tagging 2022-04-14 09:36:18 +02:00
bert-e 5eaf67ac93 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 07:33:59 +00:00
KillianG d18971bc6e
fixup tagging instead of tagset 2022-04-14 09:33:50 +02:00
bert-e 193a399ae2 Merge branch 'improvement/ARSN-138-add-put-bucket-tagging-to-action-map' into tmp/octopus/w/8.1/improvement/ARSN-138-add-put-bucket-tagging-to-action-map 2022-04-14 07:30:21 +00:00
KillianG f5bce507a5
ARSN-138: add pub bucket tagging to action map 2022-04-14 09:27:37 +02:00
bert-e 4de18e5b26 Merge branch 'w/8.1/improvement/ARSN-144-release-7.10.16' into tmp/octopus/q/8.1 2022-04-13 16:36:57 +00:00
Jordi Bertran de Balanda c7e2743bf9 Merge remote-tracking branch 'origin/improvement/ARSN-144-release-7.10.16' into w/8.1/improvement/ARSN-144-release-7.10.16 2022-04-13 18:23:52 +02:00
Jordi Bertran de Balanda a8029d8779 ARSN-145 - release 8.1.40 2022-04-13 18:19:27 +02:00
Jordi Bertran de Balanda ee49ec7d72 ARSN-144 - release 7.10.16 2022-04-13 18:15:06 +02:00
bert-e d639f4cffe Merge branches 'w/8.1/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' and 'q/1773/7.10/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' into tmp/octopus/q/8.1 2022-04-13 12:53:23 +00:00
bert-e 07e8d44406 Merge branch 'improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' into q/7.10 2022-04-13 12:53:23 +00:00
Guillaume Hivert b2ec34c8f2 Merge remote-tracking branch 'origin/feature/ARSN-98-migrate-errors-to-typescript' into w/8.1/feature/ARSN-98-migrate-errors-to-typescript 2022-04-13 14:19:16 +02:00
KillianG fb31f93829
Merge remote-tracking branch 'origin/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo' into w/8.1/improvement/ARSN-131-add-bucket-tagging-to-bucketinfo 2022-04-13 12:32:20 +02:00
Guillaume Hivert ab823b2797 ARSN-67 Fix all tests 2022-04-13 12:26:03 +02:00
Guillaume Hivert e7502c9ffd ARSN-67 Change errors.spec.js to errors.spec.ts 2022-04-13 12:01:52 +02:00
Guillaume Hivert 9de879ecc2 ARSN-67 Switch errors to TS 2022-04-13 12:01:50 +02:00
Guillaume Hivert 68ca9a6e94 ARSN-67 Rename errors and arsenalErrors 2022-04-13 12:00:38 +02:00
bert-e 6c6ee31f34 Merge branch 'q/1776/7.10/feature/ARSN-128/update-package-version' into tmp/normal/q/8.1 2022-04-12 18:37:07 +00:00
bert-e 310834c237 Merge branch 'feature/ARSN-128/update-package-version' into q/7.10 2022-04-12 18:37:06 +00:00
Kerkesni 64351cf20d
feature: ARSN-142 add support for s3:Replication:OperationFailedReplication notification 2022-04-12 15:48:44 +02:00
KillianG 118f6dc787
ARSN-131: Add bucket tagging to BucketInfo.js 2022-04-12 11:33:09 +02:00
Ronnie Smith b58b4d0773
Merge remote-tracking branch 'origin/feature/ARSN-128/update-package-version' into w/8.1/feature/ARSN-128/update-package-version 2022-04-11 18:21:13 -07:00
Ronnie Smith 3faf2433c7
feature: ARSN-128 update package version 2022-04-11 17:53:17 -07:00
Ronnie Smith 9a0915d40e
feature: ARSN-128 fix linting issues 2022-04-06 14:47:38 -07:00
Ronnie Smith 36d3a67a68
Merge remote-tracking branch 'origin/feature/ARSN-128/move-metdata-data-from-8-to-7' into w/8.1/feature/ARSN-128/move-metdata-data-from-8-to-7 2022-04-06 14:44:26 -07:00
Ronnie Smith d3d2529719
feature: ARSN-128 put bucketclient back or circular issues 2022-04-06 13:39:56 -07:00
Ronnie Smith 23b9cf6e21
feature: ARSN-128 do not pass in bucketclient to data wrapper 2022-04-06 11:47:19 -07:00
Ronnie Smith 66910fb1a4
feature: ARSN-128 add missing export and constant 2022-04-05 11:06:39 -07:00
Xin LI 3d156a58dd bugfix: ARSN-129-downgrade-socketio 2022-03-31 14:26:00 +02:00
Xin LI 7737ec4904 bugfix: ARSN-129-upgrade-socketio-fix-critical 2022-03-31 11:39:04 +02:00
Ronnie Smith 24c82170d8
feature: ARSN-128 update storage exports and fix typo 2022-03-30 19:26:59 -07:00
Ronnie Smith e26073ed6d
feature: ARSN-128 add missing error and update deps 2022-03-30 10:56:39 -07:00
Ronnie Smith 0088a2849f
feature: ARSN-128 add another missing constant 2022-03-29 17:43:15 -07:00
Ronnie Smith e902eb61db
feature: ARSN-128 add missing constant 2022-03-29 17:13:55 -07:00
Ronnie Smith 4cfd78c955
feature: ARSN-128 adding more missing parts 2022-03-29 14:46:51 -07:00
Ronnie Smith cfee038a34
feature: ARSN-128 fix md lint 2022-03-29 13:14:46 -07:00
Ronnie Smith 06c2a0d90d
feature: ARSN-128 disable eslint rule 2022-03-29 13:12:43 -07:00
Ronnie Smith 1e241bd79c
feature: ARSN-128 move tests from 8 2022-03-29 11:58:34 -07:00
Ronnie Smith 0d526df512
feature: ARSN-128 moved storage and algos from 8 2022-03-29 11:50:44 -07:00
Kerkesni d18f4d10bd
bump version 8.1.38 2022-03-25 14:18:53 +01:00
Kerkesni e0bc4383cd
bugfix: ARSN-124 add missing vFormat param 2022-03-25 14:17:57 +01:00
bert-e de17f221bf Merge branch 'bugfix/ARSN-116-fix-listing-master-returning-phd' into q/8.1 2022-03-25 09:46:53 +00:00
Kerkesni d46301b498
bump version 8.1.37 2022-03-25 10:32:35 +01:00
Kerkesni 0bb2a44912
bugfix: ARSN-116 Fixed listing of master keys
When the last version of an object is deleted
a placeholder master key is created and is kept
for 15 seconds before it gets repaired.

To avoid listing the PHD keys we added a transform
stream that replaces PHD master keys with the last
version of that object

Added tests for PHD cases + fixed unit tests
2022-03-25 10:32:35 +01:00
Guillaume Hivert 2c1fb773fd ARSN-100 Forward merge 2022-03-24 15:10:11 +01:00
Guillaume Hivert 961b5abe41 ARSN-67 Fix linter 2022-03-24 15:02:16 +01:00
Guillaume Hivert d0527d1ac1 ARSN-67 Upload Artifacts 2022-03-24 15:02:16 +01:00
Guillaume Hivert 08cb0a8c1c ARSN-67 Switch index.ts to import/export and fix JSON import in policyValidator 2022-03-24 15:02:16 +01:00
Guillaume Hivert de0678d5bf ARSN-67 Rename index.js to index.ts for proper future migration 2022-03-24 15:02:16 +01:00
Guillaume Hivert f619c0d33f ARSN-67 Remove ignore of build for NPM
Installing from git sources for dependents produced only an index.js
file. It was due to .gitignore ignoring the build folder and npm/yarn
removing the ignored files after install. Adding an empty .npmignore
solves the problem. This can be found here:
https://stackoverflow.com/questions/61754026/installing-npm-package-with-prepare-script-from-yarn-produces-only-index-js
2022-03-24 15:02:16 +01:00
Guillaume Hivert 7fea1d58a8 ARSN-67 Add TypeScript and Babel, and make test suite working 2022-03-24 15:02:16 +01:00
Guillaume Hivert db25abeb99 ARSN-84 Correct Jest configuration for test suites and coverage
Thanks to files renaming, we can follow as much as we can the jest
default configurations. The options are gone, and we're specifying only
the maxWorkers (because the test suite is linear, and bugs if we're
running it in parallel) and the collect coverage files.
The coverage script itself is joined into one command instead of three
to leverage the Jest builtin coverage.
2022-03-24 15:02:16 +01:00
Guillaume Hivert e90e37c42f ARSN-84 Rename all test files from [name].js to [name].spec.js
In order to simplify jest configuration, we have to remane the files to
follow the jest convention (to have a .spec.js extension for test
files).
2022-03-24 15:02:16 +01:00
Guillaume Hivert 38bb284694 ARSN-84 Fix Jest bug in _arsenalError
You can check out the bug at
https://github.com/facebook/jest/issues/2549.
The bug in inherent to jest and is a known bug since years, because jest
is switching the VM from node to a custom VM from jest. Jest injects
its own set of globals. The Error provided by Jest is different from
the Error provided by node and the test `err instanceof Error` is false.
Error:
```
 Expected value to be equal to:
      true
 Received:
      false
```
2022-03-24 15:02:16 +01:00
Guillaume Hivert a123b3d781 ARSN-84 Fix redis commands in functional tests
The switch from mocha to Jest introduces some tests bugs.
As far as we can tell, jest is quicker than mocha, creating some
weird behaviour: some commands send to redis (with ioredis)
are working, and some aren’t. Our conclusion is that redis needs
to queue requests offline to avoid micro-disconnections from
redis in development. Otherwise, we got the following error:
```
  - StatsModel class › should correctly record a new request by default
one increment

    assert.ifError(received, expected)

    Expected value ifError to:
      null
    Received:
      [Error: Stream isn't writeable and enableOfflineQueue options is
false]

    Message:
      ifError got unwanted exception: Stream isn't writeable and
enableOfflineQueue options is false
```
Switching enableOfflineQueue to true makes the test suite to
success.
2022-03-24 15:02:16 +01:00
Guillaume Hivert 9b583b0541 ARSN-84 Introduce Jest and reconfigure ESLint
Add Jest as a test runner as a mocha replacement to have the
TS compiling on the fly and allowing mixed sources TS/JS in the
sources (and replacing the before and after of mocha with beforeAll
and afterAll of Jest), and adding some ESLint configuration to make
ESLint happy.
2022-03-24 15:02:16 +01:00
Xin.LI 3528c24276
Update errors/arsenalErrors.json
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2022-03-21 14:13:26 +01:00
Xin LI 6d8294d0c0 improvement: ARSN-123 bump version 2022-03-21 13:31:53 +01:00
Xin LI 23bfc17a26 improvement: ARSN-123 modify BucketAlreadyOwnedByYou error description 2022-03-21 13:27:17 +01:00
bert-e 0f6a1f2982 Merge branch 'w/8.1/bugfix/ARSN-105/locations' into tmp/octopus/q/8.1 2022-03-16 14:55:09 +00:00
Nicolas Humbert bff13f1190 ARSN-105 test: add properties to ObjectMD location property 2022-03-15 15:30:02 -04:00
bert-e c857e743c8 Merge branch 'w/7.10/bugfix/ARSN-105/locations' into tmp/octopus/w/8.1/bugfix/ARSN-105/locations 2022-03-15 18:47:38 +00:00
bert-e 27e06c51cc Merge branch 'bugfix/ARSN-105/locations' into tmp/octopus/w/7.10/bugfix/ARSN-105/locations 2022-03-15 18:47:37 +00:00
Nicolas Humbert 7d254a0556 ARSN-105 Disjointed reduced locations 2022-03-15 14:03:54 -04:00
Kerkesni 5f8edd35e9
v8.1.34 2022-03-15 14:35:49 +01:00
Kerkesni 3c4359b696
bugfix: ARSN-115 Fix listing algo returning phd
In the metadata backend, PHD master is not created in v1 hence
it wasn't considered in the skipping method, in the Mongo implementation
however the PHD keys need to be taken into consideration as they are still
created
2022-03-15 14:35:49 +01:00
Kerkesni 8ecf1d9808
feature: ARSN-112 bump to v8.1.33 2022-03-14 18:14:43 +01:00
Kerkesni 74e4934654
feature: ARSN-83 update putObject function & tests
A new step is added to each of the putObjectVerCase functions
(except the ones that get called when versionning is off) where
we check if the version we are putting is a delete marker and if
it's the latest version then we delete the master.
2022-03-14 15:10:54 +01:00
Kerkesni eac87fc9de
feature: ARSN-83 update deleteObject functions & tests
Master version gets deleted when last version is a delete marker.
When deleting a versioned object we add a new step where we look for
the latest version if master was not found.
When a version is deleted the master is automatically created/updated
and set to a PHD value, we then look for the latest version if it's
a delete marker or if no other versions available the PHD master gets
deleted, else it gets repaired asynchronously.
2022-03-14 15:10:44 +01:00
Kerkesni e2be4d895d
feature: ARSN-83 update getObject function & tests
Master version is deleted when last version is a delete marker,
thus we don't return NoSuchKey error anymore when we don't find a master.
Now we look for the latest version and return it if it exists when master not found
2022-03-14 15:10:32 +01:00
bert-e c0f7ebbaa9 Merge branch 'feature/ARSN-76-support-new-bucket-format' into q/8.1 2022-03-14 11:00:03 +00:00
Kerkesni 60fcedc251
feature: ARSN-76 Updated the MongoClientInterface unit tests
Updated tests to use the correct object keys when querying the db
2022-03-14 11:54:31 +01:00
Kerkesni 10ef395501
feature: ARSN-76 Updated deleteObjectWithCond & putObjectWithCond functions
Updated functions to use the new object key format
Added unit test + updated functional tests to use vFormat
2022-03-14 11:54:31 +01:00
Kerkesni d1c8e67901
feature: ARSN-76 Updated listObject function
Added support to listing algo returning parameters requesting
dual synchronized listings, as now for v1 buckets master and version
keys have different prefixes

Added unit tests and updated functional tests to list in both bucket formats

Updated listMultipartUploads to call the internal listing function with the
correct params
2022-03-14 11:54:31 +01:00
Kerkesni 266aabef37
feature: ARSN-76 Updated deleteObject functions
Updated deleteObject function and it's internal
functions to support the new objet key format

Added unit tests + updated functional tests to test
both formatting versions
2022-03-14 11:54:31 +01:00
Kerkesni b63c909808
feature: ARSN-76 updated getObject function
Updated getObject function to get bucket vFormat
and use it to correctly format the object key

Added unit tests & updated functional tests to test
both formatting versions

Updated functional tests to use proper versions to avoid
confusion
2022-03-14 11:54:30 +01:00
Kerkesni 02ee339214
feature: ARSN-76 Updated putObject functions
Updated putObject function to first get the bucket vFormat
attribut and pass it to internal functions

Updated putObjectVerCase functions to use formatting helper
functions to take into account the new vFormat bucket attribut

Updated getLatestVersion to take into account the new key formatting

Added unit tests & updated functional tests to use both key formats
2022-03-14 11:54:30 +01:00
Kerkesni 5ca7f86350
feature: ARSN-76 Added support for new bucket key format attribut
Bucket metadata now has a new attribut vFormat that stores
the version of key formatting used for the bucket's objects

Added utility helper functions that format the keys according to
the new attribut + unit tests

Added chache for the vFormat attribut, because of the additional
db calls that will be added

Updated putBucketAttributes so that it only updates the required
values otherwise it overwrites the vFormat

Added helper function that gets bucket vFormat
2022-03-14 11:54:30 +01:00
Kerkesni 50a4fd8dc1
improvement: ARSN-110 document new bucket key format 2022-03-14 11:43:45 +01:00
bert-e 5de0c2a7da Merge branch 'improvement/ARSN-88-add-MongoClientInterface-tests' into q/8.1 2022-03-01 15:34:24 +00:00
Kerkesni b942516dca
improvement: ARSN-88 Fix withCond tests 2022-03-01 16:29:29 +01:00
Kerkesni 54181af522
improvement: ARSN-88 Add deleteObjectMD tests 2022-03-01 16:28:04 +01:00
Kerkesni 21af204956
improvement: ARSN-88 Add getObjectMD tests 2022-03-01 16:25:27 +01:00
Kerkesni 68a27be345
improvement: ARSN-88 Add listObject tests 2022-03-01 16:25:17 +01:00
Kerkesni 06350ffe15
improvement: ARSN-88 Add putObjectMD tests 2022-03-01 16:24:19 +01:00
386 changed files with 38419 additions and 19469 deletions

View File

@ -1 +1,6 @@
{ "extends": "scality" }
{
"extends": "scality",
"parserOptions": {
"ecmaVersion": 2020
}
}

25
.github/workflows/codeql.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
---
name: codeQL
on:
push:
branches: [development/*, stabilization/*, hotfix/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, typescript
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -0,0 +1,16 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -25,17 +25,18 @@ jobs:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
run: yarn cache clean && yarn install --frozen-lockfile
run: yarn install --frozen-lockfile --prefer-offline --network-concurrency 1
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
- name: lint yaml
run: yarn --silent lint_yml
- name: lint javascript
run: yarn --silent lint -- --max-warnings 0
run: yarn --silent lint --max-warnings 0
- name: lint markdown
run: yarn --silent lint_md
- name: add hostname
@ -45,7 +46,37 @@ jobs:
run: yarn --silent coverage
- name: run functional tests
run: yarn ft_test
- uses: codecov/codecov-action@v2
- uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: run executables tests
run: yarn install && yarn test
working-directory: 'lib/executables/pensieveCreds/'
compile:
name: Compile and upload build artifacts
needs: test
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install NodeJS
uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: Install dependencies
run: yarn install --frozen-lockfile --prefer-offline
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
- name: Compile
run: yarn build
continue-on-error: true # TODO ARSN-97 Remove it when no errors in TS
- name: Upload artifacts
uses: scality/action-artifacts@v4
with:
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: ./build
method: upload
if: success()

3
.gitignore vendored
View File

@ -13,3 +13,6 @@ node_modules/
# Coverage
coverage/
.nyc_output/
# TypeScript
build/

0
.npmignore Normal file
View File

12
.swcrc Normal file
View File

@ -0,0 +1,12 @@
{
"$schema": "https://swc.rs/schema.json",
"jsc": {
"parser": {
"syntax": "typescript"
},
"target": "es2017"
},
"module": {
"type": "commonjs"
}
}

6
babel.config.js Normal file
View File

@ -0,0 +1,6 @@
module.exports = {
presets: [
['@babel/preset-env', { targets: { node: 'current' } }],
'@babel/preset-typescript',
],
};

View File

@ -178,3 +178,83 @@ this._serverSideEncryption.configuredMasterKeyId = configuredMasterKeyId || unde
### Usage
Used to store the users configured KMS key id
## Model version 15
### Properties Added
```javascript
this._tags = tags || null;
```
The Tag Set of a bucket is an array of objects with Key and Value:
```javascript
[
{
Key: 'something',
Value: 'some_data'
}
]
```
## Model version 16
### Properties Added
```javascript
this._capabilities = capabilities || undefined;
```
For capacity-enabled buckets, contains the following data:
```javascript
{
_capabilities: {
VeeamSOSApi?: {
SystemInfo?: {
ProtocolVersion: String,
ModelName: String,
ProtocolCapabilities: {
CapacityInfo: Boolean,
UploadSessions: Boolean,
IAMSTS: Boolean,
},
APIEndpoints: {
IAMEndpoint: String,
STSEndpoint: String,
},
SystemRecommendations?: {
S3ConcurrentTaskLimit: Number,
S3MultiObjectDelete: Number,
StorageCurrentTasksLimit: Number,
KbBlockSize: Number,
}
LastModified?: String,
},
CapacityInfo?: {
Capacity: Number,
Available: Number,
Used: Number,
LastModified?: String,
},
}
},
}
```
### Usage
Used to store bucket tagging
## Model version 17
### Properties Added
```javascript
this._quotaMax = quotaMax || 0;
```
### Usage
Used to store bucket quota

View File

@ -0,0 +1,27 @@
# Delimiter
The Delimiter class handles raw listings from the database with an
optional delimiter, and fills in a curated listing with "Contents" and
"CommonPrefixes" as a result.
## Expected Behavior
- only lists keys belonging to the given **prefix** (if provided)
- groups listed keys that have a common prefix ending with a delimiter
inside CommonPrefixes
- can take a **marker** or **continuationToken** to list from a specific key
- can take a **maxKeys** parameter to limit how many keys can be returned
## State Chart
- States with grey background are *Idle* states, which are waiting for
a new listing key
- States with blue background are *Processing* states, which are
actively processing a new listing key passed by the filter()
function
![Delimiter State Chart](./pics/delimiterStateChart.svg)

View File

@ -0,0 +1,45 @@
# DelimiterMaster
The DelimiterMaster class handles raw listings from the database of a
versioned or non-versioned bucket with an optional delimiter, and
fills in a curated listing with "Contents" and "CommonPrefixes" as a
result.
## Expected Behavior
- only lists latest versions of versioned buckets
- only lists keys belonging to the given **prefix** (if provided)
- does not list latest versions that are delete markers
- groups listed keys that have a common prefix ending with a delimiter
inside CommonPrefixes
- can take a **marker** or **continuationToken** to list from a specific key
- can take a **maxKeys** parameter to limit how many keys can be returned
- reconciles internal PHD keys with the next version (those are
created when a specific version that is the latest version is
deleted)
- skips internal keys like replay keys
## State Chart
- States with grey background are *Idle* states, which are waiting for
a new listing key
- States with blue background are *Processing* states, which are
actively processing a new listing key passed by the filter()
function
### Bucket Vformat=v0
![DelimiterMaster State Chart for v0 format](./pics/delimiterMasterV0StateChart.svg)
### Bucket Vformat=v1
For buckets in versioning key format **v1**, the algorithm used is the
one from [Delimiter](delimiter.md).

View File

@ -0,0 +1,33 @@
# DelimiterVersions
The DelimiterVersions class handles raw listings from the database of a
versioned or non-versioned bucket with an optional delimiter, and
fills in a curated listing with "Versions" and "CommonPrefixes" as a
result.
## Expected Behavior
- lists individual distinct versions of versioned buckets
- only lists keys belonging to the given **prefix** (if provided)
- groups listed keys that have a common prefix ending with a delimiter
inside CommonPrefixes
- can take a **keyMarker** and optionally a **versionIdMarker** to
list from a specific key or version
- can take a **maxKeys** parameter to limit how many keys can be returned
- skips internal keys like replay keys
## State Chart
- States with grey background are *Idle* states, which are waiting for
a new listing key
- States with blue background are *Processing* states, which are
actively processing a new listing key passed by the filter()
function
![DelimiterVersions State Chart](./pics/delimiterVersionsStateChart.svg)

View File

@ -0,0 +1,45 @@
digraph {
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
edge [fontsize=14];
rankdir=TB;
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
node [fillcolor="lightgrey"];
"NotSkippingPrefixNorVersions.Idle" [label="NotSkippingPrefixNorVersions",group="NotSkippingPrefixNorVersions",width=4];
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
"SkippingVersions.Idle" [label="SkippingVersions",group="SkippingVersions"];
"WaitVersionAfterPHD.Idle" [label="WaitVersionAfterPHD",group="WaitVersionAfterPHD"];
node [fillcolor="lightblue"];
"NotSkippingPrefixNorVersions.Processing" [label="NotSkippingPrefixNorVersions",group="NotSkippingPrefixNorVersions",width=4];
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
"SkippingVersions.Processing" [label="SkippingVersions",group="SkippingVersions"];
"WaitVersionAfterPHD.Processing" [label="WaitVersionAfterPHD",group="WaitVersionAfterPHD"];
START -> "SkippingVersions.Idle" [label="[marker != undefined]"]
START -> "NotSkippingPrefixNorVersions.Idle" [label="[marker == undefined]"]
"NotSkippingPrefixNorVersions.Idle" -> "NotSkippingPrefixNorVersions.Processing" [label="filter(key, value)"]
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
"SkippingVersions.Idle" -> "SkippingVersions.Processing" [label="filter(key, value)"]
"WaitVersionAfterPHD.Idle" -> "WaitVersionAfterPHD.Processing" [label="filter(key, value)"]
"NotSkippingPrefixNorVersions.Processing" -> "SkippingVersions.Idle" [label="[Version.isDeleteMarker(value)]\n-> FILTER_ACCEPT"]
"NotSkippingPrefixNorVersions.Processing" -> "WaitVersionAfterPHD.Idle" [label="[Version.isPHD(value)]\n-> FILTER_ACCEPT"]
"NotSkippingPrefixNorVersions.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(<ReplayPrefix>)]\n/ prefix <- <ReplayPrefix>\n-> FILTER_SKIP"]
"NotSkippingPrefixNorVersions.Processing" -> END [label="[isListableKey(key, value) and\nKeys == maxKeys]\n-> FILTER_END"]
"NotSkippingPrefixNorVersions.Processing" -> "SkippingPrefix.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nhasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
"NotSkippingPrefixNorVersions.Processing" -> "SkippingVersions.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nnot hasDelimiter(key)]\n/ Contents.append(key, value)\n-> FILTER_ACCEPT"]
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
"SkippingPrefix.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not key.startsWith(prefix)]"]
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="[isVersionKey(key)]\n-> FILTER_SKIP"]
"SkippingVersions.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not isVersionKey(key)]"]
"WaitVersionAfterPHD.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[isVersionKey(key) and master(key) == PHDkey]\n/ key <- master(key)"]
"WaitVersionAfterPHD.Processing" -> "NotSkippingPrefixNorVersions.Processing" [label="[not isVersionKey(key) or master(key) != PHDkey]"]
}

View File

@ -0,0 +1,216 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.43.0 (0)
-->
<!-- Title: %3 Pages: 1 -->
<svg width="2313pt" height="460pt"
viewBox="0.00 0.00 2313.37 460.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 456)">
<title>%3</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-456 2309.37,-456 2309.37,4 -4,4"/>
<!-- START -->
<g id="node1" class="node">
<title>START</title>
<ellipse fill="black" stroke="black" cx="35.37" cy="-445" rx="7" ry="7"/>
</g>
<!-- NotSkippingPrefixNorVersions.Idle -->
<g id="node3" class="node">
<title>NotSkippingPrefixNorVersions.Idle</title>
<path fill="lightgrey" stroke="black" d="M925.37,-387C925.37,-387 661.37,-387 661.37,-387 655.37,-387 649.37,-381 649.37,-375 649.37,-375 649.37,-363 649.37,-363 649.37,-357 655.37,-351 661.37,-351 661.37,-351 925.37,-351 925.37,-351 931.37,-351 937.37,-357 937.37,-363 937.37,-363 937.37,-375 937.37,-375 937.37,-381 931.37,-387 925.37,-387"/>
<text text-anchor="middle" x="793.37" y="-365.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefixNorVersions</text>
</g>
<!-- START&#45;&gt;NotSkippingPrefixNorVersions.Idle -->
<g id="edge2" class="edge">
<title>START&#45;&gt;NotSkippingPrefixNorVersions.Idle</title>
<path fill="none" stroke="black" d="M42.39,-443.31C95.3,-438.15 434.98,-404.99 638.94,-385.08"/>
<polygon fill="black" stroke="black" points="639.54,-388.53 649.15,-384.08 638.86,-381.57 639.54,-388.53"/>
<text text-anchor="middle" x="497.87" y="-408.8" font-family="Times,serif" font-size="14.00">[marker == undefined]</text>
</g>
<!-- SkippingVersions.Idle -->
<g id="node5" class="node">
<title>SkippingVersions.Idle</title>
<path fill="lightgrey" stroke="black" d="M242.37,-138C242.37,-138 50.37,-138 50.37,-138 44.37,-138 38.37,-132 38.37,-126 38.37,-126 38.37,-114 38.37,-114 38.37,-108 44.37,-102 50.37,-102 50.37,-102 242.37,-102 242.37,-102 248.37,-102 254.37,-108 254.37,-114 254.37,-114 254.37,-126 254.37,-126 254.37,-132 248.37,-138 242.37,-138"/>
<text text-anchor="middle" x="146.37" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- START&#45;&gt;SkippingVersions.Idle -->
<g id="edge1" class="edge">
<title>START&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M33.04,-438.14C20.64,-405.9 -34.57,-248.17 33.37,-156 36.76,-151.4 40.74,-147.39 45.16,-143.89"/>
<polygon fill="black" stroke="black" points="47.27,-146.68 53.53,-138.13 43.3,-140.92 47.27,-146.68"/>
<text text-anchor="middle" x="85.87" y="-321.8" font-family="Times,serif" font-size="14.00">[marker != undefined]</text>
</g>
<!-- END -->
<g id="node2" class="node">
<title>END</title>
<ellipse fill="black" stroke="black" cx="727.37" cy="-120" rx="7" ry="7"/>
<ellipse fill="none" stroke="black" cx="727.37" cy="-120" rx="11" ry="11"/>
</g>
<!-- NotSkippingPrefixNorVersions.Processing -->
<g id="node7" class="node">
<title>NotSkippingPrefixNorVersions.Processing</title>
<path fill="lightblue" stroke="black" d="M925.37,-300C925.37,-300 661.37,-300 661.37,-300 655.37,-300 649.37,-294 649.37,-288 649.37,-288 649.37,-276 649.37,-276 649.37,-270 655.37,-264 661.37,-264 661.37,-264 925.37,-264 925.37,-264 931.37,-264 937.37,-270 937.37,-276 937.37,-276 937.37,-288 937.37,-288 937.37,-294 931.37,-300 925.37,-300"/>
<text text-anchor="middle" x="793.37" y="-278.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefixNorVersions</text>
</g>
<!-- NotSkippingPrefixNorVersions.Idle&#45;&gt;NotSkippingPrefixNorVersions.Processing -->
<g id="edge3" class="edge">
<title>NotSkippingPrefixNorVersions.Idle&#45;&gt;NotSkippingPrefixNorVersions.Processing</title>
<path fill="none" stroke="black" d="M793.37,-350.8C793.37,-339.16 793.37,-323.55 793.37,-310.24"/>
<polygon fill="black" stroke="black" points="796.87,-310.18 793.37,-300.18 789.87,-310.18 796.87,-310.18"/>
<text text-anchor="middle" x="851.37" y="-321.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingPrefix.Idle -->
<g id="node4" class="node">
<title>SkippingPrefix.Idle</title>
<path fill="lightgrey" stroke="black" d="M1209.37,-138C1209.37,-138 1017.37,-138 1017.37,-138 1011.37,-138 1005.37,-132 1005.37,-126 1005.37,-126 1005.37,-114 1005.37,-114 1005.37,-108 1011.37,-102 1017.37,-102 1017.37,-102 1209.37,-102 1209.37,-102 1215.37,-102 1221.37,-108 1221.37,-114 1221.37,-114 1221.37,-126 1221.37,-126 1221.37,-132 1215.37,-138 1209.37,-138"/>
<text text-anchor="middle" x="1113.37" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Processing -->
<g id="node8" class="node">
<title>SkippingPrefix.Processing</title>
<path fill="lightblue" stroke="black" d="M1070.37,-36C1070.37,-36 878.37,-36 878.37,-36 872.37,-36 866.37,-30 866.37,-24 866.37,-24 866.37,-12 866.37,-12 866.37,-6 872.37,0 878.37,0 878.37,0 1070.37,0 1070.37,0 1076.37,0 1082.37,-6 1082.37,-12 1082.37,-12 1082.37,-24 1082.37,-24 1082.37,-30 1076.37,-36 1070.37,-36"/>
<text text-anchor="middle" x="974.37" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing -->
<g id="edge4" class="edge">
<title>SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M1011.89,-101.96C994.96,-97.13 981.04,-91.17 975.37,-84 967.11,-73.56 966.25,-58.93 967.72,-46.2"/>
<polygon fill="black" stroke="black" points="971.22,-46.52 969.4,-36.09 964.31,-45.38 971.22,-46.52"/>
<text text-anchor="middle" x="1033.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingVersions.Processing -->
<g id="node9" class="node">
<title>SkippingVersions.Processing</title>
<path fill="lightblue" stroke="black" d="M381.37,-36C381.37,-36 189.37,-36 189.37,-36 183.37,-36 177.37,-30 177.37,-24 177.37,-24 177.37,-12 177.37,-12 177.37,-6 183.37,0 189.37,0 189.37,0 381.37,0 381.37,0 387.37,0 393.37,-6 393.37,-12 393.37,-12 393.37,-24 393.37,-24 393.37,-30 387.37,-36 381.37,-36"/>
<text text-anchor="middle" x="285.37" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing -->
<g id="edge5" class="edge">
<title>SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing</title>
<path fill="none" stroke="black" d="M141.4,-101.91C138.35,-87.58 136.8,-67.37 147.37,-54 151.89,-48.28 161.64,-43.34 173.99,-39.12"/>
<polygon fill="black" stroke="black" points="175.39,-42.36 183.89,-36.04 173.3,-35.67 175.39,-42.36"/>
<text text-anchor="middle" x="205.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- WaitVersionAfterPHD.Idle -->
<g id="node6" class="node">
<title>WaitVersionAfterPHD.Idle</title>
<path fill="lightgrey" stroke="black" d="M1534.37,-138C1534.37,-138 1342.37,-138 1342.37,-138 1336.37,-138 1330.37,-132 1330.37,-126 1330.37,-126 1330.37,-114 1330.37,-114 1330.37,-108 1336.37,-102 1342.37,-102 1342.37,-102 1534.37,-102 1534.37,-102 1540.37,-102 1546.37,-108 1546.37,-114 1546.37,-114 1546.37,-126 1546.37,-126 1546.37,-132 1540.37,-138 1534.37,-138"/>
<text text-anchor="middle" x="1438.37" y="-116.2" font-family="Times,serif" font-size="16.00">WaitVersionAfterPHD</text>
</g>
<!-- WaitVersionAfterPHD.Processing -->
<g id="node10" class="node">
<title>WaitVersionAfterPHD.Processing</title>
<path fill="lightblue" stroke="black" d="M1534.37,-36C1534.37,-36 1342.37,-36 1342.37,-36 1336.37,-36 1330.37,-30 1330.37,-24 1330.37,-24 1330.37,-12 1330.37,-12 1330.37,-6 1336.37,0 1342.37,0 1342.37,0 1534.37,0 1534.37,0 1540.37,0 1546.37,-6 1546.37,-12 1546.37,-12 1546.37,-24 1546.37,-24 1546.37,-30 1540.37,-36 1534.37,-36"/>
<text text-anchor="middle" x="1438.37" y="-14.2" font-family="Times,serif" font-size="16.00">WaitVersionAfterPHD</text>
</g>
<!-- WaitVersionAfterPHD.Idle&#45;&gt;WaitVersionAfterPHD.Processing -->
<g id="edge6" class="edge">
<title>WaitVersionAfterPHD.Idle&#45;&gt;WaitVersionAfterPHD.Processing</title>
<path fill="none" stroke="black" d="M1438.37,-101.58C1438.37,-86.38 1438.37,-64.07 1438.37,-46.46"/>
<polygon fill="black" stroke="black" points="1441.87,-46.22 1438.37,-36.22 1434.87,-46.22 1441.87,-46.22"/>
<text text-anchor="middle" x="1496.37" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;END -->
<g id="edge10" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;END</title>
<path fill="none" stroke="black" d="M649.15,-273.62C611.7,-268.54 578.44,-260.07 566.37,-246 540.33,-215.64 540,-186.08 566.37,-156 586.46,-133.07 673.88,-148.86 702.37,-138 705.22,-136.91 708.06,-135.44 710.76,-133.82"/>
<polygon fill="black" stroke="black" points="712.88,-136.61 719.13,-128.05 708.91,-130.84 712.88,-136.61"/>
<text text-anchor="middle" x="672.87" y="-212.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="672.87" y="-197.3" font-family="Times,serif" font-size="14.00">Keys == maxKeys]</text>
<text text-anchor="middle" x="672.87" y="-182.3" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_END</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge9" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M937.6,-274.31C1018.89,-269.01 1106.69,-260.11 1119.37,-246 1143.16,-219.51 1134.03,-175.72 1124.38,-147.62"/>
<polygon fill="black" stroke="black" points="1127.6,-146.22 1120.86,-138.04 1121.03,-148.64 1127.6,-146.22"/>
<text text-anchor="middle" x="1254.37" y="-212.3" font-family="Times,serif" font-size="14.00">[key.startsWith(&lt;ReplayPrefix&gt;)]</text>
<text text-anchor="middle" x="1254.37" y="-197.3" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; &lt;ReplayPrefix&gt;</text>
<text text-anchor="middle" x="1254.37" y="-182.3" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge11" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M799.18,-263.65C800.96,-258.05 802.85,-251.79 804.37,-246 814.73,-206.45 793.03,-183.41 823.37,-156 851.23,-130.83 954.1,-142.59 991.37,-138 992.65,-137.84 993.94,-137.68 995.24,-137.52"/>
<polygon fill="black" stroke="black" points="995.81,-140.98 1005.29,-136.25 994.93,-134.03 995.81,-140.98"/>
<text text-anchor="middle" x="969.37" y="-234.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="969.37" y="-219.8" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="969.37" y="-204.8" font-family="Times,serif" font-size="14.00">hasDelimiter(key)]</text>
<text text-anchor="middle" x="969.37" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; prefixOf(key)</text>
<text text-anchor="middle" x="969.37" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
<text text-anchor="middle" x="969.37" y="-159.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge7" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M649.11,-279.23C439.56,-275.94 73.58,-267.19 53.37,-246 25.76,-217.06 30.6,-188.89 53.37,-156 56.56,-151.39 60.44,-147.39 64.78,-143.91"/>
<polygon fill="black" stroke="black" points="66.8,-146.76 73.04,-138.2 62.83,-141 66.8,-146.76"/>
<text text-anchor="middle" x="167.87" y="-204.8" font-family="Times,serif" font-size="14.00">[Version.isDeleteMarker(value)]</text>
<text text-anchor="middle" x="167.87" y="-189.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge12" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M649.33,-279.1C514.97,-275.99 331.4,-267.75 305.37,-246 273.69,-219.53 311.53,-185.22 282.37,-156 276.73,-150.36 270.32,-145.59 263.42,-141.56"/>
<polygon fill="black" stroke="black" points="264.92,-138.39 254.44,-136.84 261.67,-144.59 264.92,-138.39"/>
<text text-anchor="middle" x="411.87" y="-227.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="411.87" y="-212.3" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="411.87" y="-197.3" font-family="Times,serif" font-size="14.00">not hasDelimiter(key)]</text>
<text text-anchor="middle" x="411.87" y="-182.3" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
<text text-anchor="middle" x="411.87" y="-167.3" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingPrefixNorVersions.Processing&#45;&gt;WaitVersionAfterPHD.Idle -->
<g id="edge8" class="edge">
<title>NotSkippingPrefixNorVersions.Processing&#45;&gt;WaitVersionAfterPHD.Idle</title>
<path fill="none" stroke="black" d="M937.38,-280.87C1099.43,-279.42 1344.59,-272.74 1378.37,-246 1411.11,-220.08 1384.48,-192.16 1405.37,-156 1407.38,-152.52 1409.8,-149.11 1412.4,-145.87"/>
<polygon fill="black" stroke="black" points="1415.16,-148.04 1419.13,-138.21 1409.9,-143.41 1415.16,-148.04"/>
<text text-anchor="middle" x="1486.87" y="-204.8" font-family="Times,serif" font-size="14.00">[Version.isPHD(value)]</text>
<text text-anchor="middle" x="1486.87" y="-189.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge13" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M1064.61,-36.08C1074.44,-40.7 1083.66,-46.57 1091.37,-54 1101.65,-63.92 1107.13,-78.81 1110.04,-91.84"/>
<polygon fill="black" stroke="black" points="1106.62,-92.56 1111.88,-101.76 1113.5,-91.29 1106.62,-92.56"/>
<text text-anchor="middle" x="1190.37" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
<text text-anchor="middle" x="1190.37" y="-57.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing -->
<g id="edge14" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing</title>
<path fill="none" stroke="black" d="M899.82,-36.01C864.18,-48.2 824.54,-68.57 802.37,-102 771.84,-148.02 779.31,-216.26 786.77,-253.8"/>
<polygon fill="black" stroke="black" points="783.43,-254.92 788.94,-263.97 790.28,-253.46 783.43,-254.92"/>
<text text-anchor="middle" x="899.37" y="-116.3" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge15" class="edge">
<title>SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M283.88,-36.24C281.71,-50.87 276.4,-71.43 263.37,-84 258.07,-89.11 252.06,-93.48 245.62,-97.21"/>
<polygon fill="black" stroke="black" points="243.85,-94.19 236.61,-101.92 247.09,-100.39 243.85,-94.19"/>
<text text-anchor="middle" x="349.87" y="-72.8" font-family="Times,serif" font-size="14.00">[isVersionKey(key)]</text>
<text text-anchor="middle" x="349.87" y="-57.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing -->
<g id="edge16" class="edge">
<title>SkippingVersions.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing</title>
<path fill="none" stroke="black" d="M382.46,-36.08C396.72,-40.7 410.82,-46.57 423.37,-54 476.67,-85.57 487.28,-102.42 518.37,-156 539.39,-192.23 514.46,-218.85 546.37,-246 561.72,-259.06 598.56,-267.25 639.23,-272.39"/>
<polygon fill="black" stroke="black" points="639.01,-275.89 649.36,-273.59 639.84,-268.93 639.01,-275.89"/>
<text text-anchor="middle" x="590.37" y="-116.3" font-family="Times,serif" font-size="14.00">[not isVersionKey(key)]</text>
</g>
<!-- WaitVersionAfterPHD.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing -->
<g id="edge17" class="edge">
<title>WaitVersionAfterPHD.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing</title>
<path fill="none" stroke="black" d="M1536.41,-36.13C1544.73,-40.79 1552.27,-46.65 1558.37,-54 1585.64,-86.89 1597.89,-215.12 1568.37,-246 1547.29,-268.05 1167.71,-276.42 947.74,-279.43"/>
<polygon fill="black" stroke="black" points="947.67,-275.93 937.71,-279.57 947.76,-282.93 947.67,-275.93"/>
<text text-anchor="middle" x="1758.37" y="-123.8" font-family="Times,serif" font-size="14.00">[isVersionKey(key) and master(key) == PHDkey]</text>
<text text-anchor="middle" x="1758.37" y="-108.8" font-family="Times,serif" font-size="14.00">/ key &lt;&#45; master(key)</text>
</g>
<!-- WaitVersionAfterPHD.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing -->
<g id="edge18" class="edge">
<title>WaitVersionAfterPHD.Processing&#45;&gt;NotSkippingPrefixNorVersions.Processing</title>
<path fill="none" stroke="black" d="M1546.51,-21.25C1677.94,-26.54 1888.29,-44.09 1937.37,-102 1947.71,-114.21 1946.85,-125.11 1937.37,-138 1841.62,-268.08 1749.48,-218.23 1590.37,-246 1471.26,-266.79 1143.92,-275.5 947.77,-278.94"/>
<polygon fill="black" stroke="black" points="947.6,-275.44 937.66,-279.11 947.72,-282.44 947.6,-275.44"/>
<text text-anchor="middle" x="2124.87" y="-116.3" font-family="Times,serif" font-size="14.00">[not isVersionKey(key) or master(key) != PHDkey]</text>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -0,0 +1,35 @@
digraph {
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
edge [fontsize=14];
rankdir=TB;
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
node [fillcolor="lightgrey"];
"NotSkipping.Idle" [label="NotSkipping",group="NotSkipping"];
"NeverSkipping.Idle" [label="NeverSkipping",group="NeverSkipping"];
"NotSkippingPrefix.Idle" [label="NotSkippingPrefix",group="NotSkippingPrefix"];
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
node [fillcolor="lightblue"];
"NeverSkipping.Processing" [label="NeverSkipping",group="NeverSkipping"];
"NotSkippingPrefix.Processing" [label="NotSkippingPrefix",group="NotSkippingPrefix"];
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
START -> "NotSkipping.Idle"
"NotSkipping.Idle" -> "NeverSkipping.Idle" [label="[delimiter == undefined]"]
"NotSkipping.Idle" -> "NotSkippingPrefix.Idle" [label="[delimiter == '/']"]
"NeverSkipping.Idle" -> "NeverSkipping.Processing" [label="filter(key, value)"]
"NotSkippingPrefix.Idle" -> "NotSkippingPrefix.Processing" [label="filter(key, value)"]
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
"NeverSkipping.Processing" -> END [label="[nKeys == maxKeys]\n-> FILTER_END"]
"NeverSkipping.Processing" -> "NeverSkipping.Idle" [label="[nKeys < maxKeys]\n/ Contents.append(key, value)\n -> FILTER_ACCEPT"]
"NotSkippingPrefix.Processing" -> END [label="[nKeys == maxKeys]\n -> FILTER_END"]
"NotSkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[nKeys < maxKeys and hasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
"NotSkippingPrefix.Processing" -> "NotSkippingPrefix.Idle" [label="[nKeys < maxKeys and not hasDelimiter(key)]\n/ Contents.append(key, value)\n -> FILTER_ACCEPT"]
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
"SkippingPrefix.Processing" -> "NotSkippingPrefix.Processing" [label="[not key.startsWith(prefix)]"]
}

View File

@ -0,0 +1,166 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.43.0 (0)
-->
<!-- Title: %3 Pages: 1 -->
<svg width="975pt" height="533pt"
viewBox="0.00 0.00 975.00 533.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 529)">
<title>%3</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-529 971,-529 971,4 -4,4"/>
<!-- START -->
<g id="node1" class="node">
<title>START</title>
<ellipse fill="black" stroke="black" cx="283" cy="-518" rx="7" ry="7"/>
</g>
<!-- NotSkipping.Idle -->
<g id="node3" class="node">
<title>NotSkipping.Idle</title>
<path fill="lightgrey" stroke="black" d="M379,-474C379,-474 187,-474 187,-474 181,-474 175,-468 175,-462 175,-462 175,-450 175,-450 175,-444 181,-438 187,-438 187,-438 379,-438 379,-438 385,-438 391,-444 391,-450 391,-450 391,-462 391,-462 391,-468 385,-474 379,-474"/>
<text text-anchor="middle" x="283" y="-452.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
</g>
<!-- START&#45;&gt;NotSkipping.Idle -->
<g id="edge1" class="edge">
<title>START&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M283,-510.58C283,-504.23 283,-494.07 283,-484.3"/>
<polygon fill="black" stroke="black" points="286.5,-484.05 283,-474.05 279.5,-484.05 286.5,-484.05"/>
</g>
<!-- END -->
<g id="node2" class="node">
<title>END</title>
<ellipse fill="black" stroke="black" cx="196" cy="-120" rx="7" ry="7"/>
<ellipse fill="none" stroke="black" cx="196" cy="-120" rx="11" ry="11"/>
</g>
<!-- NeverSkipping.Idle -->
<g id="node4" class="node">
<title>NeverSkipping.Idle</title>
<path fill="lightgrey" stroke="black" d="M262,-387C262,-387 70,-387 70,-387 64,-387 58,-381 58,-375 58,-375 58,-363 58,-363 58,-357 64,-351 70,-351 70,-351 262,-351 262,-351 268,-351 274,-357 274,-363 274,-363 274,-375 274,-375 274,-381 268,-387 262,-387"/>
<text text-anchor="middle" x="166" y="-365.2" font-family="Times,serif" font-size="16.00">NeverSkipping</text>
</g>
<!-- NotSkipping.Idle&#45;&gt;NeverSkipping.Idle -->
<g id="edge2" class="edge">
<title>NotSkipping.Idle&#45;&gt;NeverSkipping.Idle</title>
<path fill="none" stroke="black" d="M216.5,-437.82C206.51,-433.18 196.91,-427.34 189,-420 182.25,-413.74 177.33,-405.11 173.81,-396.79"/>
<polygon fill="black" stroke="black" points="177.05,-395.47 170.3,-387.31 170.49,-397.9 177.05,-395.47"/>
<text text-anchor="middle" x="279.5" y="-408.8" font-family="Times,serif" font-size="14.00">[delimiter == undefined]</text>
</g>
<!-- NotSkippingPrefix.Idle -->
<g id="node5" class="node">
<title>NotSkippingPrefix.Idle</title>
<path fill="lightgrey" stroke="black" d="M496,-387C496,-387 304,-387 304,-387 298,-387 292,-381 292,-375 292,-375 292,-363 292,-363 292,-357 298,-351 304,-351 304,-351 496,-351 496,-351 502,-351 508,-357 508,-363 508,-363 508,-375 508,-375 508,-381 502,-387 496,-387"/>
<text text-anchor="middle" x="400" y="-365.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefix</text>
</g>
<!-- NotSkipping.Idle&#45;&gt;NotSkippingPrefix.Idle -->
<g id="edge3" class="edge">
<title>NotSkipping.Idle&#45;&gt;NotSkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M340.77,-437.93C351.2,-433.2 361.45,-427.29 370,-420 377.58,-413.53 383.76,-404.65 388.51,-396.16"/>
<polygon fill="black" stroke="black" points="391.63,-397.74 393.08,-387.24 385.4,-394.54 391.63,-397.74"/>
<text text-anchor="middle" x="442.5" y="-408.8" font-family="Times,serif" font-size="14.00">[delimiter == &#39;/&#39;]</text>
</g>
<!-- NeverSkipping.Processing -->
<g id="node7" class="node">
<title>NeverSkipping.Processing</title>
<path fill="lightblue" stroke="black" d="M204,-270C204,-270 12,-270 12,-270 6,-270 0,-264 0,-258 0,-258 0,-246 0,-246 0,-240 6,-234 12,-234 12,-234 204,-234 204,-234 210,-234 216,-240 216,-246 216,-246 216,-258 216,-258 216,-264 210,-270 204,-270"/>
<text text-anchor="middle" x="108" y="-248.2" font-family="Times,serif" font-size="16.00">NeverSkipping</text>
</g>
<!-- NeverSkipping.Idle&#45;&gt;NeverSkipping.Processing -->
<g id="edge4" class="edge">
<title>NeverSkipping.Idle&#45;&gt;NeverSkipping.Processing</title>
<path fill="none" stroke="black" d="M64.1,-350.93C47.33,-346.11 33.58,-340.17 28,-333 15.72,-317.21 17.05,-304.74 28,-288 30.93,-283.52 34.58,-279.6 38.69,-276.19"/>
<polygon fill="black" stroke="black" points="40.97,-278.86 47.1,-270.22 36.92,-273.16 40.97,-278.86"/>
<text text-anchor="middle" x="86" y="-306.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- NotSkippingPrefix.Processing -->
<g id="node8" class="node">
<title>NotSkippingPrefix.Processing</title>
<path fill="lightblue" stroke="black" d="M554,-270C554,-270 362,-270 362,-270 356,-270 350,-264 350,-258 350,-258 350,-246 350,-246 350,-240 356,-234 362,-234 362,-234 554,-234 554,-234 560,-234 566,-240 566,-246 566,-246 566,-258 566,-258 566,-264 560,-270 554,-270"/>
<text text-anchor="middle" x="458" y="-248.2" font-family="Times,serif" font-size="16.00">NotSkippingPrefix</text>
</g>
<!-- NotSkippingPrefix.Idle&#45;&gt;NotSkippingPrefix.Processing -->
<g id="edge5" class="edge">
<title>NotSkippingPrefix.Idle&#45;&gt;NotSkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M395.69,-350.84C392.38,-333.75 390.03,-307.33 401,-288 403.42,-283.74 406.58,-279.94 410.19,-276.55"/>
<polygon fill="black" stroke="black" points="412.5,-279.18 418.1,-270.18 408.11,-273.73 412.5,-279.18"/>
<text text-anchor="middle" x="459" y="-306.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingPrefix.Idle -->
<g id="node6" class="node">
<title>SkippingPrefix.Idle</title>
<path fill="lightgrey" stroke="black" d="M554,-138C554,-138 362,-138 362,-138 356,-138 350,-132 350,-126 350,-126 350,-114 350,-114 350,-108 356,-102 362,-102 362,-102 554,-102 554,-102 560,-102 566,-108 566,-114 566,-114 566,-126 566,-126 566,-132 560,-138 554,-138"/>
<text text-anchor="middle" x="458" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Processing -->
<g id="node9" class="node">
<title>SkippingPrefix.Processing</title>
<path fill="lightblue" stroke="black" d="M691,-36C691,-36 499,-36 499,-36 493,-36 487,-30 487,-24 487,-24 487,-12 487,-12 487,-6 493,0 499,0 499,0 691,0 691,0 697,0 703,-6 703,-12 703,-12 703,-24 703,-24 703,-30 697,-36 691,-36"/>
<text text-anchor="middle" x="595" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing -->
<g id="edge6" class="edge">
<title>SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M452.35,-101.95C448.76,-87.65 446.54,-67.45 457,-54 461.44,-48.29 471.08,-43.36 483.3,-39.15"/>
<polygon fill="black" stroke="black" points="484.61,-42.41 493.1,-36.07 482.51,-35.73 484.61,-42.41"/>
<text text-anchor="middle" x="515" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- NeverSkipping.Processing&#45;&gt;END -->
<g id="edge7" class="edge">
<title>NeverSkipping.Processing&#45;&gt;END</title>
<path fill="none" stroke="black" d="M102.91,-233.88C97.93,-213.45 93.18,-179.15 109,-156 123.79,-134.35 154.41,-126.09 175.08,-122.94"/>
<polygon fill="black" stroke="black" points="175.62,-126.4 185.11,-121.69 174.76,-119.45 175.62,-126.4"/>
<text text-anchor="middle" x="185" y="-189.8" font-family="Times,serif" font-size="14.00">[nKeys == maxKeys]</text>
<text text-anchor="middle" x="185" y="-174.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_END</text>
</g>
<!-- NeverSkipping.Processing&#45;&gt;NeverSkipping.Idle -->
<g id="edge8" class="edge">
<title>NeverSkipping.Processing&#45;&gt;NeverSkipping.Idle</title>
<path fill="none" stroke="black" d="M129.49,-270.27C134.87,-275.48 140.18,-281.55 144,-288 153.56,-304.17 159.09,-324.63 162.21,-340.81"/>
<polygon fill="black" stroke="black" points="158.78,-341.49 163.94,-350.74 165.68,-340.29 158.78,-341.49"/>
<text text-anchor="middle" x="265.5" y="-321.8" font-family="Times,serif" font-size="14.00">[nKeys &lt; maxKeys]</text>
<text text-anchor="middle" x="265.5" y="-306.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
<text text-anchor="middle" x="265.5" y="-291.8" font-family="Times,serif" font-size="14.00"> &#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingPrefix.Processing&#45;&gt;END -->
<g id="edge9" class="edge">
<title>NotSkippingPrefix.Processing&#45;&gt;END</title>
<path fill="none" stroke="black" d="M349.96,-237.93C333,-232.81 316.36,-225.74 302,-216 275.27,-197.87 285.01,-177.6 261,-156 247.64,-143.98 229.41,-134.62 215.65,-128.62"/>
<polygon fill="black" stroke="black" points="216.74,-125.28 206.16,-124.7 214.07,-131.75 216.74,-125.28"/>
<text text-anchor="middle" x="378" y="-189.8" font-family="Times,serif" font-size="14.00">[nKeys == maxKeys]</text>
<text text-anchor="middle" x="378" y="-174.8" font-family="Times,serif" font-size="14.00"> &#45;&gt; FILTER_END</text>
</g>
<!-- NotSkippingPrefix.Processing&#45;&gt;NotSkippingPrefix.Idle -->
<g id="edge11" class="edge">
<title>NotSkippingPrefix.Processing&#45;&gt;NotSkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M499.64,-270.11C506.59,-274.86 512.87,-280.76 517,-288 526.9,-305.38 528.94,-316.96 517,-333 513.56,-337.62 509.53,-341.66 505.07,-345.18"/>
<polygon fill="black" stroke="black" points="502.89,-342.43 496.63,-350.98 506.85,-348.2 502.89,-342.43"/>
<text text-anchor="middle" x="690.5" y="-321.8" font-family="Times,serif" font-size="14.00">[nKeys &lt; maxKeys and not hasDelimiter(key)]</text>
<text text-anchor="middle" x="690.5" y="-306.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, value)</text>
<text text-anchor="middle" x="690.5" y="-291.8" font-family="Times,serif" font-size="14.00"> &#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge10" class="edge">
<title>NotSkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M458,-233.74C458,-211.98 458,-174.32 458,-148.56"/>
<polygon fill="black" stroke="black" points="461.5,-148.33 458,-138.33 454.5,-148.33 461.5,-148.33"/>
<text text-anchor="middle" x="609.5" y="-204.8" font-family="Times,serif" font-size="14.00">[nKeys &lt; maxKeys and hasDelimiter(key)]</text>
<text text-anchor="middle" x="609.5" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; prefixOf(key)</text>
<text text-anchor="middle" x="609.5" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
<text text-anchor="middle" x="609.5" y="-159.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge12" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M593.49,-36.23C591.32,-50.84 586,-71.39 573,-84 567.75,-89.09 561.77,-93.45 555.38,-97.17"/>
<polygon fill="black" stroke="black" points="553.66,-94.12 546.43,-101.87 556.91,-100.32 553.66,-94.12"/>
<text text-anchor="middle" x="672" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
<text text-anchor="middle" x="672" y="-57.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;NotSkippingPrefix.Processing -->
<g id="edge13" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;NotSkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M703.16,-31.64C728.6,-36.87 750.75,-44.11 759,-54 778.46,-77.34 776.26,-200.01 762,-216 749.37,-230.17 656.13,-239.42 576.2,-244.84"/>
<polygon fill="black" stroke="black" points="575.77,-241.36 566.03,-245.51 576.24,-248.34 575.77,-241.36"/>
<text text-anchor="middle" x="870" y="-116.3" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -0,0 +1,50 @@
digraph {
node [shape="box",style="filled,rounded",fontsize=16,fixedsize=true,width=3];
edge [fontsize=14];
rankdir=TB;
START [shape="circle",width=0.2,label="",style="filled",fillcolor="black"]
END [shape="circle",width=0.2,label="",style="filled",fillcolor="black",peripheries=2]
node [fillcolor="lightgrey"];
"NotSkipping.Idle" [label="NotSkipping",group="NotSkipping",width=4];
"SkippingPrefix.Idle" [label="SkippingPrefix",group="SkippingPrefix"];
"WaitForNullKey.Idle" [label="WaitForNullKey",group="WaitForNullKey"];
"SkippingVersions.Idle" [label="SkippingVersions",group="SkippingVersions"];
node [fillcolor="lightblue"];
"NotSkipping.Processing" [label="NotSkipping",group="NotSkipping",width=4];
"NotSkippingV0.Processing" [label="NotSkippingV0",group="NotSkipping",width=4];
"NotSkippingV1.Processing" [label="NotSkippingV1",group="NotSkipping",width=4];
"NotSkippingCommon.Processing" [label="NotSkippingCommon",group="NotSkipping",width=4];
"SkippingPrefix.Processing" [label="SkippingPrefix",group="SkippingPrefix"];
"WaitForNullKey.Processing" [label="WaitForNullKey",group="WaitForNullKey"];
"SkippingVersions.Processing" [label="SkippingVersions",group="SkippingVersions"];
START -> "WaitForNullKey.Idle" [label="[versionIdMarker != undefined]"]
START -> "NotSkipping.Idle" [label="[versionIdMarker == undefined]"]
"NotSkipping.Idle" -> "NotSkipping.Processing" [label="filter(key, value)"]
"SkippingPrefix.Idle" -> "SkippingPrefix.Processing" [label="filter(key, value)"]
"WaitForNullKey.Idle" -> "WaitForNullKey.Processing" [label="filter(key, value)"]
"SkippingVersions.Idle" -> "SkippingVersions.Processing" [label="filter(key, value)"]
"NotSkipping.Processing" -> "NotSkippingV0.Processing" [label="vFormat='v0'"]
"NotSkipping.Processing" -> "NotSkippingV1.Processing" [label="vFormat='v1'"]
"WaitForNullKey.Processing" -> "NotSkipping.Processing" [label="master(key) != keyMarker"]
"WaitForNullKey.Processing" -> "SkippingVersions.Processing" [label="master(key) == keyMarker"]
"NotSkippingV0.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(<ReplayPrefix>)]\n/ prefix <- <ReplayPrefix>\n-> FILTER_SKIP"]
"NotSkippingV0.Processing" -> "NotSkipping.Idle" [label="[Version.isPHD(value)]\n-> FILTER_ACCEPT"]
"NotSkippingV0.Processing" -> "NotSkippingCommon.Processing" [label="[not key.startsWith(<ReplayPrefix>)\nand not Version.isPHD(value)]"]
"NotSkippingV1.Processing" -> "NotSkippingCommon.Processing" [label="[always]"]
"NotSkippingCommon.Processing" -> END [label="[isListableKey(key, value) and\nKeys == maxKeys]\n-> FILTER_END"]
"NotSkippingCommon.Processing" -> "SkippingPrefix.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nhasDelimiter(key)]\n/ prefix <- prefixOf(key)\n/ CommonPrefixes.append(prefixOf(key))\n-> FILTER_ACCEPT"]
"NotSkippingCommon.Processing" -> "NotSkipping.Idle" [label="[isListableKey(key, value) and\nnKeys < maxKeys and\nnot hasDelimiter(key)]\n/ Contents.append(key, versionId, value)\n-> FILTER_ACCEPT"]
"SkippingPrefix.Processing" -> "SkippingPrefix.Idle" [label="[key.startsWith(prefix)]\n-> FILTER_SKIP"]
"SkippingPrefix.Processing" -> "NotSkipping.Processing" [label="[not key.startsWith(prefix)]"]
"SkippingVersions.Processing" -> "NotSkipping.Processing" [label="master(key) !== keyMarker or \nversionId > versionIdMarker"]
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId < versionIdMarker\n-> FILTER_SKIP"]
"SkippingVersions.Processing" -> "SkippingVersions.Idle" [label="master(key) === keyMarker and \nversionId == versionIdMarker\n-> FILTER_ACCEPT"]
}

View File

@ -0,0 +1,265 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 2.43.0 (0)
-->
<!-- Title: %3 Pages: 1 -->
<svg width="1522pt" height="922pt"
viewBox="0.00 0.00 1522.26 922.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 918)">
<title>%3</title>
<polygon fill="white" stroke="transparent" points="-4,4 -4,-918 1518.26,-918 1518.26,4 -4,4"/>
<!-- START -->
<g id="node1" class="node">
<title>START</title>
<ellipse fill="black" stroke="black" cx="393.26" cy="-907" rx="7" ry="7"/>
</g>
<!-- NotSkipping.Idle -->
<g id="node3" class="node">
<title>NotSkipping.Idle</title>
<path fill="lightgrey" stroke="black" d="M436.26,-675C436.26,-675 172.26,-675 172.26,-675 166.26,-675 160.26,-669 160.26,-663 160.26,-663 160.26,-651 160.26,-651 160.26,-645 166.26,-639 172.26,-639 172.26,-639 436.26,-639 436.26,-639 442.26,-639 448.26,-645 448.26,-651 448.26,-651 448.26,-663 448.26,-663 448.26,-669 442.26,-675 436.26,-675"/>
<text text-anchor="middle" x="304.26" y="-653.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
</g>
<!-- START&#45;&gt;NotSkipping.Idle -->
<g id="edge2" class="edge">
<title>START&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M391.06,-899.87C380.45,-870.31 334.26,-741.58 313.93,-684.93"/>
<polygon fill="black" stroke="black" points="317.12,-683.46 310.45,-675.23 310.53,-685.82 317.12,-683.46"/>
<text text-anchor="middle" x="470.76" y="-783.8" font-family="Times,serif" font-size="14.00">[versionIdMarker == undefined]</text>
</g>
<!-- WaitForNullKey.Idle -->
<g id="node5" class="node">
<title>WaitForNullKey.Idle</title>
<path fill="lightgrey" stroke="black" d="M692.26,-849C692.26,-849 500.26,-849 500.26,-849 494.26,-849 488.26,-843 488.26,-837 488.26,-837 488.26,-825 488.26,-825 488.26,-819 494.26,-813 500.26,-813 500.26,-813 692.26,-813 692.26,-813 698.26,-813 704.26,-819 704.26,-825 704.26,-825 704.26,-837 704.26,-837 704.26,-843 698.26,-849 692.26,-849"/>
<text text-anchor="middle" x="596.26" y="-827.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
</g>
<!-- START&#45;&gt;WaitForNullKey.Idle -->
<g id="edge1" class="edge">
<title>START&#45;&gt;WaitForNullKey.Idle</title>
<path fill="none" stroke="black" d="M399.56,-903.7C420.56,-896.05 489.7,-870.85 540.08,-852.48"/>
<polygon fill="black" stroke="black" points="541.38,-855.73 549.57,-849.02 538.98,-849.16 541.38,-855.73"/>
<text text-anchor="middle" x="608.76" y="-870.8" font-family="Times,serif" font-size="14.00">[versionIdMarker != undefined]</text>
</g>
<!-- END -->
<g id="node2" class="node">
<title>END</title>
<ellipse fill="black" stroke="black" cx="45.26" cy="-120" rx="7" ry="7"/>
<ellipse fill="none" stroke="black" cx="45.26" cy="-120" rx="11" ry="11"/>
</g>
<!-- NotSkipping.Processing -->
<g id="node7" class="node">
<title>NotSkipping.Processing</title>
<path fill="lightblue" stroke="black" d="M761.26,-558C761.26,-558 497.26,-558 497.26,-558 491.26,-558 485.26,-552 485.26,-546 485.26,-546 485.26,-534 485.26,-534 485.26,-528 491.26,-522 497.26,-522 497.26,-522 761.26,-522 761.26,-522 767.26,-522 773.26,-528 773.26,-534 773.26,-534 773.26,-546 773.26,-546 773.26,-552 767.26,-558 761.26,-558"/>
<text text-anchor="middle" x="629.26" y="-536.2" font-family="Times,serif" font-size="16.00">NotSkipping</text>
</g>
<!-- NotSkipping.Idle&#45;&gt;NotSkipping.Processing -->
<g id="edge3" class="edge">
<title>NotSkipping.Idle&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M333.17,-638.98C364.86,-620.99 417.68,-592.92 466.26,-576 483.64,-569.95 502.44,-564.74 520.88,-560.34"/>
<polygon fill="black" stroke="black" points="521.83,-563.71 530.78,-558.04 520.25,-556.89 521.83,-563.71"/>
<text text-anchor="middle" x="524.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingPrefix.Idle -->
<g id="node4" class="node">
<title>SkippingPrefix.Idle</title>
<path fill="lightgrey" stroke="black" d="M662.26,-138C662.26,-138 470.26,-138 470.26,-138 464.26,-138 458.26,-132 458.26,-126 458.26,-126 458.26,-114 458.26,-114 458.26,-108 464.26,-102 470.26,-102 470.26,-102 662.26,-102 662.26,-102 668.26,-102 674.26,-108 674.26,-114 674.26,-114 674.26,-126 674.26,-126 674.26,-132 668.26,-138 662.26,-138"/>
<text text-anchor="middle" x="566.26" y="-116.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Processing -->
<g id="node11" class="node">
<title>SkippingPrefix.Processing</title>
<path fill="lightblue" stroke="black" d="M779.26,-36C779.26,-36 587.26,-36 587.26,-36 581.26,-36 575.26,-30 575.26,-24 575.26,-24 575.26,-12 575.26,-12 575.26,-6 581.26,0 587.26,0 587.26,0 779.26,0 779.26,0 785.26,0 791.26,-6 791.26,-12 791.26,-12 791.26,-24 791.26,-24 791.26,-30 785.26,-36 779.26,-36"/>
<text text-anchor="middle" x="683.26" y="-14.2" font-family="Times,serif" font-size="16.00">SkippingPrefix</text>
</g>
<!-- SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing -->
<g id="edge4" class="edge">
<title>SkippingPrefix.Idle&#45;&gt;SkippingPrefix.Processing</title>
<path fill="none" stroke="black" d="M552.64,-101.74C543.31,-87.68 534.41,-67.95 545.26,-54 549.71,-48.29 559.34,-43.36 571.56,-39.15"/>
<polygon fill="black" stroke="black" points="572.87,-42.41 581.36,-36.07 570.77,-35.73 572.87,-42.41"/>
<text text-anchor="middle" x="603.26" y="-65.3" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- WaitForNullKey.Processing -->
<g id="node12" class="node">
<title>WaitForNullKey.Processing</title>
<path fill="lightblue" stroke="black" d="M692.26,-762C692.26,-762 500.26,-762 500.26,-762 494.26,-762 488.26,-756 488.26,-750 488.26,-750 488.26,-738 488.26,-738 488.26,-732 494.26,-726 500.26,-726 500.26,-726 692.26,-726 692.26,-726 698.26,-726 704.26,-732 704.26,-738 704.26,-738 704.26,-750 704.26,-750 704.26,-756 698.26,-762 692.26,-762"/>
<text text-anchor="middle" x="596.26" y="-740.2" font-family="Times,serif" font-size="16.00">WaitForNullKey</text>
</g>
<!-- WaitForNullKey.Idle&#45;&gt;WaitForNullKey.Processing -->
<g id="edge5" class="edge">
<title>WaitForNullKey.Idle&#45;&gt;WaitForNullKey.Processing</title>
<path fill="none" stroke="black" d="M596.26,-812.8C596.26,-801.16 596.26,-785.55 596.26,-772.24"/>
<polygon fill="black" stroke="black" points="599.76,-772.18 596.26,-762.18 592.76,-772.18 599.76,-772.18"/>
<text text-anchor="middle" x="654.26" y="-783.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- SkippingVersions.Idle -->
<g id="node6" class="node">
<title>SkippingVersions.Idle</title>
<path fill="lightgrey" stroke="black" d="M1241.26,-558C1241.26,-558 1049.26,-558 1049.26,-558 1043.26,-558 1037.26,-552 1037.26,-546 1037.26,-546 1037.26,-534 1037.26,-534 1037.26,-528 1043.26,-522 1049.26,-522 1049.26,-522 1241.26,-522 1241.26,-522 1247.26,-522 1253.26,-528 1253.26,-534 1253.26,-534 1253.26,-546 1253.26,-546 1253.26,-552 1247.26,-558 1241.26,-558"/>
<text text-anchor="middle" x="1145.26" y="-536.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- SkippingVersions.Processing -->
<g id="node13" class="node">
<title>SkippingVersions.Processing</title>
<path fill="lightblue" stroke="black" d="M1241.26,-675C1241.26,-675 1049.26,-675 1049.26,-675 1043.26,-675 1037.26,-669 1037.26,-663 1037.26,-663 1037.26,-651 1037.26,-651 1037.26,-645 1043.26,-639 1049.26,-639 1049.26,-639 1241.26,-639 1241.26,-639 1247.26,-639 1253.26,-645 1253.26,-651 1253.26,-651 1253.26,-663 1253.26,-663 1253.26,-669 1247.26,-675 1241.26,-675"/>
<text text-anchor="middle" x="1145.26" y="-653.2" font-family="Times,serif" font-size="16.00">SkippingVersions</text>
</g>
<!-- SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing -->
<g id="edge6" class="edge">
<title>SkippingVersions.Idle&#45;&gt;SkippingVersions.Processing</title>
<path fill="none" stroke="black" d="M1145.26,-558.25C1145.26,-576.77 1145.26,-606.45 1145.26,-628.25"/>
<polygon fill="black" stroke="black" points="1141.76,-628.53 1145.26,-638.53 1148.76,-628.53 1141.76,-628.53"/>
<text text-anchor="middle" x="1203.26" y="-594.8" font-family="Times,serif" font-size="14.00">filter(key, value)</text>
</g>
<!-- NotSkippingV0.Processing -->
<g id="node8" class="node">
<title>NotSkippingV0.Processing</title>
<path fill="lightblue" stroke="black" d="M436.26,-411C436.26,-411 172.26,-411 172.26,-411 166.26,-411 160.26,-405 160.26,-399 160.26,-399 160.26,-387 160.26,-387 160.26,-381 166.26,-375 172.26,-375 172.26,-375 436.26,-375 436.26,-375 442.26,-375 448.26,-381 448.26,-387 448.26,-387 448.26,-399 448.26,-399 448.26,-405 442.26,-411 436.26,-411"/>
<text text-anchor="middle" x="304.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV0</text>
</g>
<!-- NotSkipping.Processing&#45;&gt;NotSkippingV0.Processing -->
<g id="edge7" class="edge">
<title>NotSkipping.Processing&#45;&gt;NotSkippingV0.Processing</title>
<path fill="none" stroke="black" d="M573.96,-521.95C558.07,-516.64 540.84,-510.46 525.26,-504 460.22,-477.02 387.62,-439.36 343.97,-415.84"/>
<polygon fill="black" stroke="black" points="345.57,-412.72 335.11,-411.04 342.24,-418.88 345.57,-412.72"/>
<text text-anchor="middle" x="573.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat=&#39;v0&#39;</text>
</g>
<!-- NotSkippingV1.Processing -->
<g id="node9" class="node">
<title>NotSkippingV1.Processing</title>
<path fill="lightblue" stroke="black" d="M758.26,-411C758.26,-411 494.26,-411 494.26,-411 488.26,-411 482.26,-405 482.26,-399 482.26,-399 482.26,-387 482.26,-387 482.26,-381 488.26,-375 494.26,-375 494.26,-375 758.26,-375 758.26,-375 764.26,-375 770.26,-381 770.26,-387 770.26,-387 770.26,-399 770.26,-399 770.26,-405 764.26,-411 758.26,-411"/>
<text text-anchor="middle" x="626.26" y="-389.2" font-family="Times,serif" font-size="16.00">NotSkippingV1</text>
</g>
<!-- NotSkipping.Processing&#45;&gt;NotSkippingV1.Processing -->
<g id="edge8" class="edge">
<title>NotSkipping.Processing&#45;&gt;NotSkippingV1.Processing</title>
<path fill="none" stroke="black" d="M628.91,-521.8C628.39,-496.94 627.44,-450.74 626.83,-421.23"/>
<polygon fill="black" stroke="black" points="630.32,-421.11 626.62,-411.18 623.33,-421.25 630.32,-421.11"/>
<text text-anchor="middle" x="676.76" y="-462.8" font-family="Times,serif" font-size="14.00">vFormat=&#39;v1&#39;</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;NotSkipping.Idle -->
<g id="edge12" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M304.26,-411.25C304.26,-455.74 304.26,-574.61 304.26,-628.62"/>
<polygon fill="black" stroke="black" points="300.76,-628.81 304.26,-638.81 307.76,-628.81 300.76,-628.81"/>
<text text-anchor="middle" x="385.76" y="-543.8" font-family="Times,serif" font-size="14.00">[Version.isPHD(value)]</text>
<text text-anchor="middle" x="385.76" y="-528.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge11" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M448.41,-376.93C508.52,-369.95 565.63,-362.09 570.26,-357 622.9,-299.12 594.8,-196.31 577.11,-147.78"/>
<polygon fill="black" stroke="black" points="580.33,-146.4 573.53,-138.28 573.78,-148.87 580.33,-146.4"/>
<text text-anchor="middle" x="720.26" y="-297.8" font-family="Times,serif" font-size="14.00">[key.startsWith(&lt;ReplayPrefix&gt;)]</text>
<text text-anchor="middle" x="720.26" y="-282.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; &lt;ReplayPrefix&gt;</text>
<text text-anchor="middle" x="720.26" y="-267.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- NotSkippingCommon.Processing -->
<g id="node10" class="node">
<title>NotSkippingCommon.Processing</title>
<path fill="lightblue" stroke="black" d="M436.26,-304.5C436.26,-304.5 172.26,-304.5 172.26,-304.5 166.26,-304.5 160.26,-298.5 160.26,-292.5 160.26,-292.5 160.26,-280.5 160.26,-280.5 160.26,-274.5 166.26,-268.5 172.26,-268.5 172.26,-268.5 436.26,-268.5 436.26,-268.5 442.26,-268.5 448.26,-274.5 448.26,-280.5 448.26,-280.5 448.26,-292.5 448.26,-292.5 448.26,-298.5 442.26,-304.5 436.26,-304.5"/>
<text text-anchor="middle" x="304.26" y="-282.7" font-family="Times,serif" font-size="16.00">NotSkippingCommon</text>
</g>
<!-- NotSkippingV0.Processing&#45;&gt;NotSkippingCommon.Processing -->
<g id="edge13" class="edge">
<title>NotSkippingV0.Processing&#45;&gt;NotSkippingCommon.Processing</title>
<path fill="none" stroke="black" d="M304.26,-374.74C304.26,-358.48 304.26,-333.85 304.26,-314.9"/>
<polygon fill="black" stroke="black" points="307.76,-314.78 304.26,-304.78 300.76,-314.78 307.76,-314.78"/>
<text text-anchor="middle" x="435.26" y="-345.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(&lt;ReplayPrefix&gt;)</text>
<text text-anchor="middle" x="435.26" y="-330.8" font-family="Times,serif" font-size="14.00">and not Version.isPHD(value)]</text>
</g>
<!-- NotSkippingV1.Processing&#45;&gt;NotSkippingCommon.Processing -->
<g id="edge14" class="edge">
<title>NotSkippingV1.Processing&#45;&gt;NotSkippingCommon.Processing</title>
<path fill="none" stroke="black" d="M616.43,-374.83C606.75,-359.62 590.48,-338.14 570.26,-327 549.98,-315.83 505.48,-307.38 458.57,-301.23"/>
<polygon fill="black" stroke="black" points="458.9,-297.74 448.53,-299.95 458.01,-304.69 458.9,-297.74"/>
<text text-anchor="middle" x="632.26" y="-338.3" font-family="Times,serif" font-size="14.00">[always]</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;END -->
<g id="edge15" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;END</title>
<path fill="none" stroke="black" d="M159.92,-279.56C109.8,-274.24 62.13,-264.33 46.26,-246 20.92,-216.72 30.42,-167.54 38.5,-140.42"/>
<polygon fill="black" stroke="black" points="41.94,-141.16 41.67,-130.57 35.27,-139.02 41.94,-141.16"/>
<text text-anchor="middle" x="152.76" y="-212.3" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="152.76" y="-197.3" font-family="Times,serif" font-size="14.00">Keys == maxKeys]</text>
<text text-anchor="middle" x="152.76" y="-182.3" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_END</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;NotSkipping.Idle -->
<g id="edge17" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;NotSkipping.Idle</title>
<path fill="none" stroke="black" d="M214.74,-304.54C146.51,-322.73 57.06,-358.99 13.26,-429 -49.27,-528.95 128.43,-602.49 233.32,-635.95"/>
<polygon fill="black" stroke="black" points="232.34,-639.31 242.93,-638.97 234.43,-632.63 232.34,-639.31"/>
<text text-anchor="middle" x="156.76" y="-492.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="156.76" y="-477.8" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="156.76" y="-462.8" font-family="Times,serif" font-size="14.00">not hasDelimiter(key)]</text>
<text text-anchor="middle" x="156.76" y="-447.8" font-family="Times,serif" font-size="14.00">/ Contents.append(key, versionId, value)</text>
<text text-anchor="middle" x="156.76" y="-432.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- NotSkippingCommon.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge16" class="edge">
<title>NotSkippingCommon.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M292.14,-268.23C288.18,-261.59 284.27,-253.75 282.26,-246 272.21,-207.28 255.76,-185.96 282.26,-156 293.6,-143.18 374.98,-134.02 447.74,-128.3"/>
<polygon fill="black" stroke="black" points="448.24,-131.77 457.94,-127.51 447.7,-124.79 448.24,-131.77"/>
<text text-anchor="middle" x="428.26" y="-234.8" font-family="Times,serif" font-size="14.00">[isListableKey(key, value) and</text>
<text text-anchor="middle" x="428.26" y="-219.8" font-family="Times,serif" font-size="14.00">nKeys &lt; maxKeys and</text>
<text text-anchor="middle" x="428.26" y="-204.8" font-family="Times,serif" font-size="14.00">hasDelimiter(key)]</text>
<text text-anchor="middle" x="428.26" y="-189.8" font-family="Times,serif" font-size="14.00">/ prefix &lt;&#45; prefixOf(key)</text>
<text text-anchor="middle" x="428.26" y="-174.8" font-family="Times,serif" font-size="14.00">/ CommonPrefixes.append(prefixOf(key))</text>
<text text-anchor="middle" x="428.26" y="-159.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle -->
<g id="edge18" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;SkippingPrefix.Idle</title>
<path fill="none" stroke="black" d="M681.57,-36.04C679.28,-50.54 673.9,-71.03 661.26,-84 656.4,-88.99 650.77,-93.28 644.72,-96.95"/>
<polygon fill="black" stroke="black" points="642.71,-94.06 635.6,-101.92 646.05,-100.21 642.71,-94.06"/>
<text text-anchor="middle" x="759.26" y="-72.8" font-family="Times,serif" font-size="14.00">[key.startsWith(prefix)]</text>
<text text-anchor="middle" x="759.26" y="-57.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingPrefix.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge19" class="edge">
<title>SkippingPrefix.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M791.46,-33.51C815.84,-38.71 837.21,-45.46 846.26,-54 868.07,-74.57 864.26,-89.02 864.26,-119 864.26,-394 864.26,-394 864.26,-394 864.26,-462.4 791.27,-499.6 726.64,-519.12"/>
<polygon fill="black" stroke="black" points="725.39,-515.84 716.77,-521.99 727.35,-522.56 725.39,-515.84"/>
<text text-anchor="middle" x="961.26" y="-282.8" font-family="Times,serif" font-size="14.00">[not key.startsWith(prefix)]</text>
</g>
<!-- WaitForNullKey.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge9" class="edge">
<title>WaitForNullKey.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M599.08,-725.78C604.81,-690.67 617.89,-610.59 624.8,-568.31"/>
<polygon fill="black" stroke="black" points="628.3,-568.61 626.46,-558.18 621.39,-567.48 628.3,-568.61"/>
<text text-anchor="middle" x="707.26" y="-653.3" font-family="Times,serif" font-size="14.00">master(key) != keyMarker</text>
</g>
<!-- WaitForNullKey.Processing&#45;&gt;SkippingVersions.Processing -->
<g id="edge10" class="edge">
<title>WaitForNullKey.Processing&#45;&gt;SkippingVersions.Processing</title>
<path fill="none" stroke="black" d="M704.4,-726.26C797.32,-711.87 931.09,-691.16 1026.87,-676.33"/>
<polygon fill="black" stroke="black" points="1027.55,-679.77 1036.89,-674.78 1026.47,-672.85 1027.55,-679.77"/>
<text text-anchor="middle" x="1001.26" y="-696.8" font-family="Times,serif" font-size="14.00">master(key) == keyMarker</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge21" class="edge">
<title>SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M1241.89,-638.98C1249.74,-634.29 1256.75,-628.4 1262.26,-621 1274.21,-604.96 1274.21,-592.04 1262.26,-576 1258.82,-571.38 1254.79,-567.34 1250.33,-563.82"/>
<polygon fill="black" stroke="black" points="1252.11,-560.8 1241.89,-558.02 1248.15,-566.57 1252.11,-560.8"/>
<text text-anchor="middle" x="1392.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
<text text-anchor="middle" x="1392.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId &lt; versionIdMarker</text>
<text text-anchor="middle" x="1392.26" y="-579.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_SKIP</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle -->
<g id="edge22" class="edge">
<title>SkippingVersions.Processing&#45;&gt;SkippingVersions.Idle</title>
<path fill="none" stroke="black" d="M1036.97,-654.38C978.97,-650.96 915.73,-642.25 897.26,-621 884.15,-605.9 884.15,-591.1 897.26,-576 914.65,-555.99 971.71,-547.1 1026.73,-543.28"/>
<polygon fill="black" stroke="black" points="1027.21,-546.76 1036.97,-542.62 1026.76,-539.77 1027.21,-546.76"/>
<text text-anchor="middle" x="1019.26" y="-609.8" font-family="Times,serif" font-size="14.00">master(key) === keyMarker and </text>
<text text-anchor="middle" x="1019.26" y="-594.8" font-family="Times,serif" font-size="14.00">versionId == versionIdMarker</text>
<text text-anchor="middle" x="1019.26" y="-579.8" font-family="Times,serif" font-size="14.00">&#45;&gt; FILTER_ACCEPT</text>
</g>
<!-- SkippingVersions.Processing&#45;&gt;NotSkipping.Processing -->
<g id="edge20" class="edge">
<title>SkippingVersions.Processing&#45;&gt;NotSkipping.Processing</title>
<path fill="none" stroke="black" d="M1037.02,-651.24C897.84,-644.67 672.13,-632.37 657.26,-621 641.04,-608.6 634.18,-586.13 631.3,-568.16"/>
<polygon fill="black" stroke="black" points="634.76,-567.68 630.02,-558.21 627.82,-568.57 634.76,-567.68"/>
<text text-anchor="middle" x="770.26" y="-602.3" font-family="Times,serif" font-size="14.00">master(key) !== keyMarker or </text>
<text text-anchor="middle" x="770.26" y="-587.3" font-family="Times,serif" font-size="14.00">versionId &gt; versionIdMarker</text>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 21 KiB

View File

@ -1,773 +0,0 @@
{
"_comment": "------------------- Amazon errors ------------------",
"AccessDenied": {
"code": 403,
"description": "Access Denied"
},
"AccessForbidden": {
"code": 403,
"description": "Access Forbidden"
},
"AccountProblem": {
"code": 403,
"description": "There is a problem with your AWS account that prevents the operation from completing successfully. Please use Contact Us."
},
"AmbiguousGrantByEmailAddress": {
"code": 400,
"description": "The email address you provided is associated with more than one account."
},
"BadDigest": {
"code": 400,
"description": "The Content-MD5 you specified did not match what we received."
},
"BucketAlreadyExists": {
"code": 409,
"description": "The requested bucket name is not available. The bucket namespace is shared by all users of the system. Please select a different name and try again."
},
"BucketAlreadyOwnedByYou": {
"code": 409,
"description": "Your previous request to create the named bucket succeeded and you already own it. You get this error in all AWS regions except US Standard, us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists S3 will not do anything)."
},
"BucketNotEmpty": {
"code": 409,
"description": "The bucket you tried to delete is not empty."
},
"CredentialsNotSupported": {
"code": 400,
"description": "This request does not support credentials."
},
"CrossLocationLoggingProhibited": {
"code": 403,
"description": "Cross-location logging not allowed. Buckets in one geographic location cannot log information to a bucket in another location."
},
"DeleteConflict": {
"code": 409,
"description": "The request was rejected because it attempted to delete a resource that has attached subordinate entities. The error message describes these entities."
},
"EntityTooSmall": {
"code": 400,
"description": "Your proposed upload is smaller than the minimum allowed object size."
},
"EntityTooLarge": {
"code": 400,
"description": "Your proposed upload exceeds the maximum allowed object size."
},
"ExpiredToken": {
"code": 400,
"description": "The provided token has expired."
},
"HttpHeadersTooLarge": {
"code": 400,
"description": "Your http headers exceed the maximum allowed http headers size."
},
"IllegalVersioningConfigurationException": {
"code": 400,
"description": "Indicates that the versioning configuration specified in the request is invalid."
},
"IncompleteBody": {
"code": 400,
"description": "You did not provide the number of bytes specified by the Content-Length HTTP header."
},
"IncorrectNumberOfFilesInPostRequest": {
"code": 400,
"description": "POST requires exactly one file upload per request."
},
"InlineDataTooLarge": {
"code": 400,
"description": "Inline data exceeds the maximum allowed size."
},
"InternalError": {
"code": 500,
"description": "We encountered an internal error. Please try again."
},
"InvalidAccessKeyId": {
"code": 403,
"description": "The AWS access key Id you provided does not exist in our records."
},
"InvalidAddressingHeader": {
"code": 400,
"description": "You must specify the Anonymous role."
},
"InvalidArgument": {
"code": 400,
"description": "Invalid Argument"
},
"InvalidBucketName": {
"code": 400,
"description": "The specified bucket is not valid."
},
"InvalidBucketState": {
"code": 409,
"description": "The request is not valid with the current state of the bucket."
},
"InvalidDigest": {
"code": 400,
"description": "The Content-MD5 you specified is not valid."
},
"InvalidEncryptionAlgorithmError": {
"code": 400,
"description": "The encryption request you specified is not valid. The valid value is AES256."
},
"InvalidLocationConstraint": {
"code": 400,
"description": "The specified location constraint is not valid."
},
"InvalidObjectState": {
"code": 403,
"description": "The operation is not valid for the current state of the object."
},
"InvalidPart": {
"code": 400,
"description": "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."
},
"InvalidPartOrder": {
"code": 400,
"description": "The list of parts was not in ascending order.Parts list must specified in order by part number."
},
"InvalidPartNumber": {
"code": 416,
"description": "The requested partnumber is not satisfiable."
},
"InvalidPayer": {
"code": 403,
"description": "All access to this object has been disabled."
},
"InvalidPolicyDocument": {
"code": 400,
"description": "The content of the form does not meet the conditions specified in the policy document."
},
"InvalidRange": {
"code": 416,
"description": "The requested range cannot be satisfied."
},
"InvalidRedirectLocation": {
"code": 400,
"description": "The website redirect location must have a prefix of 'http://' or 'https://' or '/'."
},
"InvalidRequest": {
"code": 400,
"description": "SOAP requests must be made over an HTTPS connection."
},
"InvalidSecurity": {
"code": 403,
"description": "The provided security credentials are not valid."
},
"InvalidSOAPRequest": {
"code": 400,
"description": "The SOAP request body is invalid."
},
"InvalidStorageClass": {
"code": 400,
"description": "The storage class you specified is not valid."
},
"InvalidTag": {
"code": 400,
"description": "The Tag you have provided is invalid"
},
"InvalidTargetBucketForLogging": {
"code": 400,
"description": "The target bucket for logging does not exist, is not owned by you, or does not have the appropriate grants for the log-delivery group."
},
"InvalidToken": {
"code": 400,
"description": "The provided token is malformed or otherwise invalid."
},
"InvalidURI": {
"code": 400,
"description": "Couldn't parse the specified URI."
},
"KeyTooLong": {
"code": 400,
"description": "Your key is too long."
},
"LimitExceeded": {
"code": 409,
"description": " The request was rejected because it attempted to create resources beyond the current AWS account limits. The error message describes the limit exceeded."
},
"MalformedACLError": {
"code": 400,
"description": "The XML you provided was not well-formed or did not validate against our published schema."
},
"MalformedPOSTRequest": {
"code": 400,
"description": "The body of your POST request is not well-formed multipart/form-data."
},
"MalformedXML": {
"code": 400,
"description": "The XML you provided was not well-formed or did not validate against our published schema."
},
"MaxMessageLengthExceeded": {
"code": 400,
"description": "Your request was too big."
},
"MaxPostPreDataLengthExceededError": {
"code": 400,
"description": "Your POST request fields preceding the upload file were too large."
},
"MetadataTooLarge": {
"code": 400,
"description": "Your metadata headers exceed the maximum allowed metadata size."
},
"MethodNotAllowed": {
"code": 405,
"description": "The specified method is not allowed against this resource."
},
"MissingAttachment": {
"code": 400,
"description": "A SOAP attachment was expected, but none were found."
},
"MissingContentLength": {
"code": 411,
"description": "You must provide the Content-Length HTTP header."
},
"MissingRequestBodyError": {
"code": 400,
"description": "Request body is empty"
},
"MissingRequiredParameter": {
"code": 400,
"description": "Your request is missing a required parameter."
},
"MissingSecurityElement": {
"code": 400,
"description": "The SOAP 1.1 request is missing a security element."
},
"MissingSecurityHeader": {
"code": 400,
"description": "Your request is missing a required header."
},
"NoLoggingStatusForKey": {
"code": 400,
"description": "There is no such thing as a logging status subresource for a key."
},
"NoSuchBucket": {
"code": 404,
"description": "The specified bucket does not exist."
},
"NoSuchCORSConfiguration": {
"code": 404,
"description": "The CORS configuration does not exist"
},
"NoSuchKey": {
"code": 404,
"description": "The specified key does not exist."
},
"NoSuchLifecycleConfiguration": {
"code": 404,
"description": "The lifecycle configuration does not exist."
},
"NoSuchObjectLockConfiguration": {
"code": 404,
"description": "The specified object does not have a ObjectLock configuration."
},
"NoSuchWebsiteConfiguration": {
"code": 404,
"description": "The specified bucket does not have a website configuration"
},
"NoSuchUpload": {
"code": 404,
"description": "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed."
},
"NoSuchVersion": {
"code": 404,
"description": "Indicates that the version ID specified in the request does not match an existing version."
},
"ReplicationConfigurationNotFoundError": {
"code": 404,
"description": "The replication configuration was not found"
},
"ObjectLockConfigurationNotFoundError": {
"code": 404,
"description": "The object lock configuration was not found"
},
"ServerSideEncryptionConfigurationNotFoundError" : {
"code": 404,
"description": "The server side encryption configuration was not found"
},
"NotImplemented": {
"code": 501,
"description": "A header you provided implies functionality that is not implemented."
},
"NotModified": {
"code": 304,
"description": "Not Modified."
},
"NotSignedUp": {
"code": 403,
"description": "Your account is not signed up for the S3 service. You must sign up before you can use S3. "
},
"NoSuchBucketPolicy": {
"code": 404,
"description": "The specified bucket does not have a bucket policy."
},
"OperationAborted": {
"code": 409,
"description": "A conflicting conditional operation is currently in progress against this resource. Try again."
},
"PermanentRedirect": {
"code": 301,
"description": "The bucket you are attempting to access must be addressed using the specified endpoint. Send all future requests to this endpoint."
},
"PreconditionFailed": {
"code": 412,
"description": "At least one of the preconditions you specified did not hold."
},
"Redirect": {
"code": 307,
"description": "Temporary redirect."
},
"RestoreAlreadyInProgress": {
"code": 409,
"description": "Object restore is already in progress."
},
"RequestIsNotMultiPartContent": {
"code": 400,
"description": "Bucket POST must be of the enclosure-type multipart/form-data."
},
"RequestTimeout": {
"code": 400,
"description": "Your socket connection to the server was not read from or written to within the timeout period."
},
"RequestTimeTooSkewed": {
"code": 403,
"description": "The difference between the request time and the server's time is too large."
},
"RequestTorrentOfBucketError": {
"code": 400,
"description": "Requesting the torrent file of a bucket is not permitted."
},
"SignatureDoesNotMatch": {
"code": 403,
"description": "The request signature we calculated does not match the signature you provided."
},
"_comment" : {
"note" : "This is an AWS S3 specific error. We are opting to use the more general 'ServiceUnavailable' error used throughout AWS (IAM/EC2) to have uniformity of error messages even though we are potentially compromising S3 compatibility.",
"ServiceUnavailable": {
"code": 503,
"description": "Reduce your request rate."
}
},
"ServiceUnavailable": {
"code": 503,
"description": "The request has failed due to a temporary failure of the server."
},
"SlowDown": {
"code": 503,
"description": "Reduce your request rate."
},
"TemporaryRedirect": {
"code": 307,
"description": "You are being redirected to the bucket while DNS updates."
},
"TokenRefreshRequired": {
"code": 400,
"description": "The provided token must be refreshed."
},
"TooManyBuckets": {
"code": 400,
"description": "You have attempted to create more buckets than allowed."
},
"TooManyParts": {
"code": 400,
"description": "You have attempted to upload more parts than allowed."
},
"UnexpectedContent": {
"code": 400,
"description": "This request does not support content."
},
"UnresolvableGrantByEmailAddress": {
"code": 400,
"description": "The email address you provided does not match any account on record."
},
"UserKeyMustBeSpecified": {
"code": 400,
"description": "The bucket POST must contain the specified field name. If it is specified, check the order of the fields."
},
"NoSuchEntity": {
"code": 404,
"description": "The request was rejected because it referenced an entity that does not exist. The error message describes the entity."
},
"WrongFormat": {
"code": 400,
"description": "Data entered by the user has a wrong format."
},
"Forbidden": {
"code": 403,
"description": "Authentication failed."
},
"EntityDoesNotExist": {
"code": 404,
"description": "Not found."
},
"EntityAlreadyExists": {
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"KeyAlreadyExists": {
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"ServiceFailure": {
"code": 500,
"description": "Server error: the request processing has failed because of an unknown error, exception or failure."
},
"IncompleteSignature": {
"code": 400,
"description": "The request signature does not conform to AWS standards."
},
"InternalFailure": {
"code": 500,
"description": "The request processing has failed because of an unknown error, exception or failure."
},
"InvalidAction": {
"code": 400,
"description": "The action or operation requested is invalid. Verify that the action is typed correctly."
},
"InvalidClientTokenId": {
"code": 403,
"description": "The X.509 certificate or AWS access key ID provided does not exist in our records."
},
"InvalidParameterCombination": {
"code": 400,
"description": "Parameters that must not be used together were used together."
},
"InvalidParameterValue": {
"code": 400,
"description": "An invalid or out-of-range value was supplied for the input parameter."
},
"InvalidQueryParameter": {
"code": 400,
"description": "The AWS query string is malformed or does not adhere to AWS standards."
},
"MalformedQueryString": {
"code": 404,
"description": "The query string contains a syntax error."
},
"MissingAction": {
"code": 400,
"description": "The request is missing an action or a required parameter."
},
"MissingAuthenticationToken": {
"code": 403,
"description": "The request must contain either a valid (registered) AWS access key ID or X.509 certificate."
},
"MissingParameter": {
"code": 400,
"description": "A required parameter for the specified action is not supplied."
},
"OptInRequired": {
"code": 403,
"description": "The AWS access key ID needs a subscription for the service."
},
"RequestExpired": {
"code": 400,
"description": "The request reached the service more than 15 minutes after the date stamp on the request or more than 15 minutes after the request expiration date (such as for pre-signed URLs), or the date stamp on the request is more than 15 minutes in the future."
},
"Throttling": {
"code": 400,
"description": "The request was denied due to request throttling."
},
"AccountNotFound": {
"code": 404,
"description": "No account was found in Vault, please contact your system administrator."
},
"ValidationError": {
"code": 400,
"description": "The specified value is invalid."
},
"MalformedPolicyDocument": {
"code": 400,
"description": "Syntax errors in policy."
},
"InvalidInput": {
"code": 400,
"description": "The request was rejected because an invalid or out-of-range value was supplied for an input parameter."
},
"MalformedPolicy": {
"code": 400,
"description": "This policy contains invalid Json"
},
"ReportExpired": {
"code": 410,
"description": "The request was rejected because the most recent credential report has expired. To generate a new credential report, use GenerateCredentialReport."
},
"ReportInProgress": {
"code": 404,
"description": "The request was rejected because the credential report is still being generated."
},
"ReportNotPresent": {
"code": 410,
"description": "The request was rejected because the credential report does not exist. To generate a credential report, use GenerateCredentialReport."
},
"_comment": "-------------- Special non-AWS S3 errors --------------",
"MPUinProgress": {
"code": 409,
"description": "The bucket you tried to delete has an ongoing multipart upload."
},
"LocationNotFound": {
"code": 424,
"description": "The object data location does not exist."
},
"_comment": "-------------- Internal project errors --------------",
"_comment": "----------------------- Vault -----------------------",
"_comment": "#### formatErrors ####",
"BadName": {
"description": "name not ok",
"code": 5001
},
"BadAccount": {
"description": "account not ok",
"code": 5002
},
"BadGroup": {
"description": "group not ok",
"code": 5003
},
"BadId": {
"description": "id not ok",
"code": 5004
},
"BadAccountName": {
"description": "accountName not ok",
"code": 5005
},
"BadNameFriendly": {
"description": "nameFriendly not ok",
"code": 5006
},
"BadEmailAddress": {
"description": "email address not ok",
"code": 5007
},
"BadPath": {
"description": "path not ok",
"code": 5008
},
"BadArn": {
"description": "arn not ok",
"code": 5009
},
"BadCreateDate": {
"description": "createDate not ok",
"code": 5010
},
"BadLastUsedDate": {
"description": "lastUsedDate not ok",
"code": 5011
},
"BadNotBefore": {
"description": "notBefore not ok",
"code": 5012
},
"BadNotAfter": {
"description": "notAfter not ok",
"code": 5013
},
"BadSaltedPwd": {
"description": "salted password not ok",
"code": 5014
},
"ok": {
"description": "No error",
"code": 200
},
"BadUser": {
"description": "user not ok",
"code": 5016
},
"BadSaltedPasswd": {
"description": "salted password not ok",
"code": 5017
},
"BadPasswdDate": {
"description": "password date not ok",
"code": 5018
},
"BadCanonicalId": {
"description": "canonicalId not ok",
"code": 5019
},
"BadAlias": {
"description": "alias not ok",
"code": 5020
},
"_comment": "#### internalErrors ####",
"DBPutFailed": {
"description": "DB put failed",
"code": 5021
},
"_comment": "#### alreadyExistErrors ####",
"AccountEmailAlreadyUsed": {
"description": "an other account already uses that email",
"code": 5022
},
"AccountNameAlreadyUsed": {
"description": "an other account already uses that name",
"code": 5023
},
"UserEmailAlreadyUsed": {
"description": "an other user already uses that email",
"code": 5024
},
"UserNameAlreadyUsed": {
"description": "an other user already uses that name",
"code": 5025
},
"_comment": "#### doesntExistErrors ####",
"NoParentAccount": {
"description": "parent account does not exist",
"code": 5026
},
"_comment": "#### authErrors ####",
"BadStringToSign": {
"description": "stringToSign not ok'",
"code": 5027
},
"BadSignatureFromRequest": {
"description": "signatureFromRequest not ok",
"code": 5028
},
"BadAlgorithm": {
"description": "hashAlgorithm not ok",
"code": 5029
},
"SecretKeyDoesNotExist": {
"description": "secret key does not exist",
"code": 5030
},
"InvalidRegion": {
"description": "Region was not provided or is not recognized by the system",
"code": 5031
},
"ScopeDate": {
"description": "scope date is missing, or format is invalid",
"code": 5032
},
"BadAccessKey": {
"description": "access key not ok",
"code": 5033
},
"NoDict": {
"description": "no dictionary of params provided for signature verification",
"code": 5034
},
"BadSecretKey": {
"description": "secretKey not ok",
"code": 5035
},
"BadSecretKeyValue": {
"description": "secretKey value not ok",
"code": 5036
},
"BadSecretKeyStatus": {
"description": "secretKey status not ok",
"code": 5037
},
"_comment": "#### OidcpErrors ####",
"BadUrl": {
"description": "url not ok",
"code": 5038
},
"BadClientIdList": {
"description": "client id list not ok'",
"code": 5039
},
"BadThumbprintList": {
"description": "thumbprint list not ok'",
"code": 5040
},
"BadObject": {
"description": "Object not ok'",
"code": 5041
},
"_comment": "#### RoleErrors ####",
"BadRole": {
"description": "role not ok",
"code": 5042
},
"_comment": "#### SamlpErrors ####",
"BadSamlp": {
"description": "samlp not ok",
"code": 5043
},
"BadMetadataDocument": {
"description": "metadata document not ok",
"code": 5044
},
"BadSessionIndex": {
"description": "session index not ok",
"code": 5045
},
"Unauthorized": {
"description": "not authenticated",
"code": 401
},
"_comment": "--------------------- MetaData ---------------------",
"_comment": "#### formatErrors ####",
"CacheUpdated": {
"description": "The cache has been updated",
"code": 500
},
"DBNotFound": {
"description": "This DB does not exist",
"code": 404
},
"DBAlreadyExists": {
"description": "This DB already exist",
"code": 409
},
"ObjNotFound": {
"description": "This object does not exist",
"code": 404
},
"PermissionDenied": {
"description": "Permission denied",
"code": 403
},
"BadRequest": {
"description": "BadRequest",
"code": 400
},
"RaftSessionNotLeader": {
"description": "NotLeader",
"code": 500
},
"RaftSessionLeaderNotConnected": {
"description": "RaftSessionLeaderNotConnected",
"code": 400
},
"NoLeaderForDB": {
"description": "NoLeaderForDB",
"code": 400
},
"RouteNotFound": {
"description": "RouteNotFound",
"code": 404
},
"NoMapsInConfig": {
"description": "NoMapsInConfig",
"code": 404
},
"DBAPINotReady": {
"message": "DBAPINotReady",
"code": 500
},
"NotEnoughMapsInConfig:": {
"description": "NotEnoughMapsInConfig",
"code": 400
},
"TooManyRequests": {
"description": "TooManyRequests",
"code": 429
},
"_comment": "----------------------- cdmiclient -----------------------",
"ReadOnly": {
"description": "trying to write to read only back-end",
"code": 403
},
"_comment": "----------------------- authbackend -----------------------",
"AuthMethodNotImplemented": {
"description": "AuthMethodNotImplemented",
"code": 501
}
}

202
index.js
View File

@ -1,202 +0,0 @@
module.exports = {
auth: require('./lib/auth/auth'),
constants: require('./lib/constants'),
db: require('./lib/db'),
errors: require('./lib/errors.js'),
errorUtils: require('./lib/errorUtils'),
shuffle: require('./lib/shuffle'),
stringHash: require('./lib/stringHash'),
ipCheck: require('./lib/ipCheck'),
jsutil: require('./lib/jsutil'),
https: {
ciphers: require('./lib/https/ciphers.js'),
dhparam: require('./lib/https/dh2048.js'),
},
algorithms: {
list: require('./lib/algos/list/exportAlgos'),
listTools: {
DelimiterTools: require('./lib/algos/list/tools'),
Skip: require('./lib/algos/list/skip'),
},
cache: {
LRUCache: require('./lib/algos/cache/LRUCache'),
},
stream: {
MergeStream: require('./lib/algos/stream/MergeStream'),
},
SortedSet: require('./lib/algos/set/SortedSet'),
},
policies: {
evaluators: require('./lib/policyEvaluator/evaluator.js'),
validateUserPolicy: require('./lib/policy/policyValidator')
.validateUserPolicy,
evaluatePrincipal: require('./lib/policyEvaluator/principal'),
RequestContext: require('./lib/policyEvaluator/RequestContext.js'),
requestUtils: require('./lib/policyEvaluator/requestUtils'),
actionMaps: require('./lib/policyEvaluator/utils/actionMaps'),
},
Clustering: require('./lib/Clustering'),
testing: {
matrix: require('./lib/testing/matrix.js'),
},
versioning: {
VersioningConstants: require('./lib/versioning/constants.js')
.VersioningConstants,
Version: require('./lib/versioning/Version.js').Version,
VersionID: require('./lib/versioning/VersionID.js'),
WriteGatheringManager: require('./lib/versioning/WriteGatheringManager.js'),
WriteCache: require('./lib/versioning/WriteCache.js'),
VersioningRequestProcessor: require('./lib/versioning/VersioningRequestProcessor.js'),
},
network: {
http: {
server: require('./lib/network/http/server'),
utils: require('./lib/network/http/utils'),
},
rpc: require('./lib/network/rpc/rpc'),
level: require('./lib/network/rpc/level-net'),
rest: {
RESTServer: require('./lib/network/rest/RESTServer'),
RESTClient: require('./lib/network/rest/RESTClient'),
},
RoundRobin: require('./lib/network/RoundRobin'),
probe: {
ProbeServer: require('./lib/network/probe/ProbeServer'),
HealthProbeServer:
require('./lib/network/probe/HealthProbeServer.js'),
Utils: require('./lib/network/probe/Utils.js'),
},
kmip: require('./lib/network/kmip'),
kmipClient: require('./lib/network/kmip/Client'),
},
s3routes: {
routes: require('./lib/s3routes/routes'),
routesUtils: require('./lib/s3routes/routesUtils'),
},
s3middleware: {
userMetadata: require('./lib/s3middleware/userMetadata'),
convertToXml: require('./lib/s3middleware/convertToXml'),
escapeForXml: require('./lib/s3middleware/escapeForXml'),
objectLegalHold: require('./lib/s3middleware/objectLegalHold'),
tagging: require('./lib/s3middleware/tagging'),
checkDateModifiedHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.checkDateModifiedHeaders,
validateConditionalHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.validateConditionalHeaders,
MD5Sum: require('./lib/s3middleware/MD5Sum'),
NullStream: require('./lib/s3middleware/nullStream'),
objectUtils: require('./lib/s3middleware/objectUtils'),
azureHelper: {
mpuUtils:
require('./lib/s3middleware/azureHelpers/mpuUtils'),
ResultsCollector:
require('./lib/s3middleware/azureHelpers/ResultsCollector'),
SubStreamInterface:
require('./lib/s3middleware/azureHelpers/SubStreamInterface'),
},
prepareStream: require('./lib/s3middleware/prepareStream'),
processMpuParts: require('./lib/s3middleware/processMpuParts'),
retention: require('./lib/s3middleware/objectRetention'),
lifecycleHelpers: require('./lib/s3middleware/lifecycleHelpers'),
},
storage: {
metadata: {
MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
bucketclient: {
BucketClientInterface:
require('./lib/storage/metadata/bucketclient/' +
'BucketClientInterface'),
LogConsumer:
require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
file: {
BucketFileInterface:
require('./lib/storage/metadata/file/BucketFileInterface'),
MetadataFileServer:
require('./lib/storage/metadata/file/MetadataFileServer'),
MetadataFileClient:
require('./lib/storage/metadata/file/MetadataFileClient'),
},
inMemory: {
metastore:
require('./lib/storage/metadata/in_memory/metastore'),
metadata: require('./lib/storage/metadata/in_memory/metadata'),
bucketUtilities:
require('./lib/storage/metadata/in_memory/bucket_utilities'),
},
mongoclient: {
MongoClientInterface:
require('./lib/storage/metadata/mongoclient/' +
'MongoClientInterface'),
LogConsumer:
require('./lib/storage/metadata/mongoclient/LogConsumer'),
},
proxy: {
Server: require('./lib/storage/metadata/proxy/Server'),
},
},
data: {
DataWrapper: require('./lib/storage/data/DataWrapper'),
MultipleBackendGateway:
require('./lib/storage/data/MultipleBackendGateway'),
parseLC: require('./lib/storage/data/LocationConstraintParser'),
file: {
DataFileStore:
require('./lib/storage/data/file/DataFileStore'),
DataFileInterface:
require('./lib/storage/data/file/DataFileInterface'),
},
external: {
AwsClient: require('./lib/storage/data/external/AwsClient'),
AzureClient: require('./lib/storage/data/external/AzureClient'),
GcpClient: require('./lib/storage/data/external/GcpClient'),
GCP: require('./lib/storage/data/external/GCP/GcpService'),
GcpUtils: require('./lib/storage/data/external/GCP/GcpUtils'),
GcpSigner: require('./lib/storage/data/external/GCP/GcpSigner'),
PfsClient: require('./lib/storage/data/external/PfsClient'),
backendUtils: require('./lib/storage/data/external/utils'),
},
inMemory: {
datastore: require('./lib/storage/data/in_memory/datastore'),
},
},
utils: require('./lib/storage/utils'),
},
models: {
BackendInfo: require('./lib/models/BackendInfo'),
BucketInfo: require('./lib/models/BucketInfo'),
BucketAzureInfo: require('./lib/models/BucketAzureInfo'),
ObjectMD: require('./lib/models/ObjectMD'),
ObjectMDLocation: require('./lib/models/ObjectMDLocation'),
ObjectMDAzureInfo: require('./lib/models/ObjectMDAzureInfo'),
ARN: require('./lib/models/ARN'),
WebsiteConfiguration: require('./lib/models/WebsiteConfiguration'),
ReplicationConfiguration:
require('./lib/models/ReplicationConfiguration'),
LifecycleConfiguration:
require('./lib/models/LifecycleConfiguration'),
LifecycleRule: require('./lib/models/LifecycleRule'),
BucketPolicy: require('./lib/models/BucketPolicy'),
ObjectLockConfiguration:
require('./lib/models/ObjectLockConfiguration'),
NotificationConfiguration:
require('./lib/models/NotificationConfiguration'),
},
metrics: {
StatsClient: require('./lib/metrics/StatsClient'),
StatsModel: require('./lib/metrics/StatsModel'),
RedisClient: require('./lib/metrics/RedisClient'),
ZenkoMetrics: require('./lib/metrics/ZenkoMetrics'),
},
pensieve: {
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
},
stream: {
readJSONStreamObject: require('./lib/stream/readJSONStreamObject'),
},
patches: {
locationConstraints: require('./lib/patches/locationConstraints'),
},
};

175
index.ts Normal file
View File

@ -0,0 +1,175 @@
import * as evaluators from './lib/policyEvaluator/evaluator';
import evaluatePrincipal from './lib/policyEvaluator/principal';
import RequestContext, {
actionNeedQuotaCheck,
actionNeedQuotaCheckCopy,
actionWithDataDeletion } from './lib/policyEvaluator/RequestContext';
import * as requestUtils from './lib/policyEvaluator/requestUtils';
import * as actionMaps from './lib/policyEvaluator/utils/actionMaps';
import { validateUserPolicy } from './lib/policy/policyValidator'
import * as locationConstraints from './lib/patches/locationConstraints';
import * as userMetadata from './lib/s3middleware/userMetadata';
import convertToXml from './lib/s3middleware/convertToXml';
import escapeForXml from './lib/s3middleware/escapeForXml';
import * as objectLegalHold from './lib/s3middleware/objectLegalHold';
import * as tagging from './lib/s3middleware/tagging';
import { checkDateModifiedHeaders } from './lib/s3middleware/validateConditionalHeaders';
import { validateConditionalHeaders } from './lib/s3middleware/validateConditionalHeaders';
import MD5Sum from './lib/s3middleware/MD5Sum';
import NullStream from './lib/s3middleware/nullStream';
import * as objectUtils from './lib/s3middleware/objectUtils';
import * as mpuUtils from './lib/s3middleware/azureHelpers/mpuUtils';
import ResultsCollector from './lib/s3middleware/azureHelpers/ResultsCollector';
import SubStreamInterface from './lib/s3middleware/azureHelpers/SubStreamInterface';
import { prepareStream } from './lib/s3middleware/prepareStream';
import * as processMpuParts from './lib/s3middleware/processMpuParts';
import * as retention from './lib/s3middleware/objectRetention';
import * as objectRestore from './lib/s3middleware/objectRestore';
import * as lifecycleHelpers from './lib/s3middleware/lifecycleHelpers';
export { default as errors } from './lib/errors';
export { default as Clustering } from './lib/Clustering';
export * as ClusterRPC from './lib/clustering/ClusterRPC';
export * as ipCheck from './lib/ipCheck';
export * as auth from './lib/auth/auth';
export * as constants from './lib/constants';
export * as https from './lib/https';
export * as metrics from './lib/metrics';
export * as network from './lib/network';
export * as s3routes from './lib/s3routes';
export * as versioning from './lib/versioning';
export * as stream from './lib/stream';
export * as jsutil from './lib/jsutil';
export { default as stringHash } from './lib/stringHash';
export * as db from './lib/db';
export * as errorUtils from './lib/errorUtils';
export { default as shuffle } from './lib/shuffle';
export * as models from './lib/models';
export const algorithms = {
list: require('./lib/algos/list/exportAlgos'),
listTools: {
DelimiterTools: require('./lib/algos/list/tools'),
Skip: require('./lib/algos/list/skip'),
},
cache: {
GapSet: require('./lib/algos/cache/GapSet'),
GapCache: require('./lib/algos/cache/GapCache'),
LRUCache: require('./lib/algos/cache/LRUCache'),
},
stream: {
MergeStream: require('./lib/algos/stream/MergeStream'),
},
SortedSet: require('./lib/algos/set/SortedSet'),
Heap: require('./lib/algos/heap/Heap'),
};
export const policies = {
evaluators,
validateUserPolicy,
evaluatePrincipal,
RequestContext,
requestUtils,
actionMaps,
actionNeedQuotaCheck,
actionWithDataDeletion,
actionNeedQuotaCheckCopy,
};
export const testing = {
matrix: require('./lib/testing/matrix.js'),
};
export const s3middleware = {
userMetadata,
convertToXml,
escapeForXml,
objectLegalHold,
tagging,
checkDateModifiedHeaders,
validateConditionalHeaders,
MD5Sum,
NullStream,
objectUtils,
azureHelper: {
mpuUtils,
ResultsCollector,
SubStreamInterface,
},
prepareStream,
processMpuParts,
retention,
objectRestore,
lifecycleHelpers,
};
export const storage = {
metadata: {
MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
bucketclient: {
BucketClientInterface:
require('./lib/storage/metadata/bucketclient/' +
'BucketClientInterface'),
LogConsumer:
require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
file: {
BucketFileInterface:
require('./lib/storage/metadata/file/BucketFileInterface'),
MetadataFileServer:
require('./lib/storage/metadata/file/MetadataFileServer'),
MetadataFileClient:
require('./lib/storage/metadata/file/MetadataFileClient'),
},
inMemory: {
metastore:
require('./lib/storage/metadata/in_memory/metastore'),
metadata: require('./lib/storage/metadata/in_memory/metadata'),
bucketUtilities:
require('./lib/storage/metadata/in_memory/bucket_utilities'),
},
mongoclient: {
MongoClientInterface:
require('./lib/storage/metadata/mongoclient/' +
'MongoClientInterface'),
LogConsumer:
require('./lib/storage/metadata/mongoclient/LogConsumer'),
},
proxy: {
Server: require('./lib/storage/metadata/proxy/Server'),
},
},
data: {
DataWrapper: require('./lib/storage/data/DataWrapper'),
MultipleBackendGateway:
require('./lib/storage/data/MultipleBackendGateway'),
parseLC: require('./lib/storage/data/LocationConstraintParser'),
file: {
DataFileStore:
require('./lib/storage/data/file/DataFileStore'),
DataFileInterface:
require('./lib/storage/data/file/DataFileInterface'),
},
external: {
AwsClient: require('./lib/storage/data/external/AwsClient'),
AzureClient: require('./lib/storage/data/external/AzureClient'),
GcpClient: require('./lib/storage/data/external/GcpClient'),
GCP: require('./lib/storage/data/external/GCP/GcpService'),
GcpUtils: require('./lib/storage/data/external/GCP/GcpUtils'),
GcpSigner: require('./lib/storage/data/external/GCP/GcpSigner'),
PfsClient: require('./lib/storage/data/external/PfsClient'),
backendUtils: require('./lib/storage/data/external/utils'),
},
inMemory: {
datastore: require('./lib/storage/data/in_memory/datastore'),
},
},
utils: require('./lib/storage/utils'),
};
export const pensieve = {
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
};
export const patches = {
locationConstraints,
};

View File

@ -1,18 +1,28 @@
'use strict'; // eslint-disable-line
import cluster, { Worker } from 'cluster';
import * as werelogs from 'werelogs';
const cluster = require('cluster');
export default class Clustering {
_size: number;
_shutdownTimeout: number;
_logger: werelogs.Logger;
_shutdown: boolean;
_workers: (Worker | undefined)[];
_workersTimeout: (NodeJS.Timeout | undefined)[];
_workersStatus: (number | string | undefined)[];
_status: number;
_exitCb?: (clustering: Clustering, exitSignal?: string) => void;
_index?: number;
class Clustering {
/**
* Constructor
*
* @param {number} size Cluster size
* @param {Logger} logger Logger object
* @param {number} [shutdownTimeout=5000] Change default shutdown timeout
* @param size Cluster size
* @param logger Logger object
* @param [shutdownTimeout=5000] Change default shutdown timeout
* releasing ressources
* @return {Clustering} itself
* @return itself
*/
constructor(size, logger, shutdownTimeout) {
constructor(size: number, logger: werelogs.Logger, shutdownTimeout?: number) {
this._size = size;
if (size < 1) {
throw new Error('Cluster size must be greater than or equal to 1');
@ -32,7 +42,6 @@ class Clustering {
* Method called after a stop() call
*
* @private
* @return {undefined}
*/
_afterStop() {
// Asuming all workers shutdown gracefully
@ -41,10 +50,11 @@ class Clustering {
for (let i = 0; i < size; ++i) {
// If the process return an error code or killed by a signal,
// set the status
if (typeof this._workersStatus[i] === 'number') {
this._status = this._workersStatus[i];
const status = this._workersStatus[i];
if (typeof status === 'number') {
this._status = status;
break;
} else if (typeof this._workersStatus[i] === 'string') {
} else if (typeof status === 'string') {
this._status = 1;
break;
}
@ -58,13 +68,17 @@ class Clustering {
/**
* Method called when a worker exited
*
* @param {Cluster.worker} worker - Current worker
* @param {number} i - Worker index
* @param {number} code - Exit code
* @param {string} signal - Exit signal
* @return {undefined}
* @param worker - Current worker
* @param i - Worker index
* @param code - Exit code
* @param signal - Exit signal
*/
_workerExited(worker, i, code, signal) {
_workerExited(
worker: Worker,
i: number,
code: number,
signal: string,
) {
// If the worker:
// - was killed by a signal
// - return an error code
@ -91,8 +105,9 @@ class Clustering {
this._workersStatus[i] = undefined;
}
this._workers[i] = undefined;
if (this._workersTimeout[i]) {
clearTimeout(this._workersTimeout[i]);
const timeout = this._workersTimeout[i];
if (timeout) {
clearTimeout(timeout);
this._workersTimeout[i] = undefined;
}
// If we don't trigger the stop method, the watchdog
@ -110,29 +125,28 @@ class Clustering {
/**
* Method to start a worker
*
* @param {number} i Index of the starting worker
* @return {undefined}
* @param i Index of the starting worker
*/
startWorker(i) {
if (!cluster.isMaster) {
startWorker(i: number) {
if (!cluster.isPrimary) {
return;
}
// Fork a new worker
this._workers[i] = cluster.fork();
// Listen for message from the worker
this._workers[i].on('message', msg => {
this._workers[i]!.on('message', msg => {
// If the worker is ready, send him his id
if (msg === 'ready') {
this._workers[i].send({ msg: 'setup', id: i });
this._workers[i]!.send({ msg: 'setup', id: i });
}
});
this._workers[i].on('exit', (code, signal) =>
this._workerExited(this._workers[i], i, code, signal));
this._workers[i]!.on('exit', (code, signal) =>
this._workerExited(this._workers[i]!, i, code, signal));
// Trigger when the worker was started
this._workers[i].on('online', () => {
this._workers[i]!.on('online', () => {
this._logger.info('Worker started', {
id: i,
childPid: this._workers[i].process.pid,
childPid: this._workers[i]!.process.pid,
});
});
}
@ -140,10 +154,10 @@ class Clustering {
/**
* Method to put handler on cluster exit
*
* @param {function} cb - Callback(Clustering, [exitSignal])
* @return {Clustering} Itself
* @param cb - Callback(Clustering, [exitSignal])
* @return Itself
*/
onExit(cb) {
onExit(cb: (clustering: Clustering, exitSignal?: string) => void) {
this._exitCb = cb;
return this;
}
@ -152,33 +166,33 @@ class Clustering {
* Method to start the cluster (if master) or to start the callback
* (worker)
*
* @param {function} cb - Callback to run the worker
* @return {Clustering} itself
* @param cb - Callback to run the worker
* @return itself
*/
start(cb) {
start(cb: (clustering: Clustering) => void) {
process.on('SIGINT', () => this.stop('SIGINT'));
process.on('SIGHUP', () => this.stop('SIGHUP'));
process.on('SIGQUIT', () => this.stop('SIGQUIT'));
process.on('SIGTERM', () => this.stop('SIGTERM'));
process.on('SIGPIPE', () => {});
process.on('exit', (code, signal) => {
process.on('exit', (code?: number, signal?: string) => {
if (this._exitCb) {
this._status = code || 0;
return this._exitCb(this, signal);
}
return process.exit(code || 0);
});
process.on('uncaughtException', err => {
process.on('uncaughtException', (err: Error) => {
this._logger.fatal('caught error', {
error: err.message,
stack: err.stack.split('\n').map(str => str.trim()),
stack: err.stack?.split('\n')?.map(str => str.trim()),
});
process.exit(1);
});
if (!cluster.isMaster) {
if (!cluster.isPrimary) {
// Waiting for message from master to
// know the id of the slave cluster
process.on('message', msg => {
process.on('message', (msg: any) => {
if (msg.msg === 'setup') {
this._index = msg.id;
cb(this);
@ -186,7 +200,7 @@ class Clustering {
});
// Send message to the master, to let him know
// the worker has started
process.send('ready');
process.send?.('ready');
} else {
for (let i = 0; i < this._size; ++i) {
this.startWorker(i);
@ -198,7 +212,7 @@ class Clustering {
/**
* Method to get workers
*
* @return {Cluster.Worker[]} Workers
* @return Workers
*/
getWorkers() {
return this._workers;
@ -207,7 +221,7 @@ class Clustering {
/**
* Method to get the status of the cluster
*
* @return {number} Status code
* @return Status code
*/
getStatus() {
return this._status;
@ -216,7 +230,7 @@ class Clustering {
/**
* Method to return if it's the master process
*
* @return {boolean} - True if master, false otherwise
* @return - True if master, false otherwise
*/
isMaster() {
return this._index === undefined;
@ -225,7 +239,7 @@ class Clustering {
/**
* Method to get index of the worker
*
* @return {number|undefined} Worker index, undefined if it's master
* @return Worker index, undefined if it's master
*/
getIndex() {
return this._index;
@ -234,11 +248,10 @@ class Clustering {
/**
* Method to stop the cluster
*
* @param {string} signal - Set internally when processes killed by signal
* @return {undefined}
* @param signal - Set internally when processes killed by signal
*/
stop(signal) {
if (!cluster.isMaster) {
stop(signal?: string) {
if (!cluster.isPrimary) {
if (this._exitCb) {
return this._exitCb(this, signal);
}
@ -251,13 +264,17 @@ class Clustering {
}
this._workersTimeout[i] = setTimeout(() => {
// Kill the worker if the sigterm was ignored or take too long
process.kill(worker.process.pid, 'SIGKILL');
if (worker.process.pid) {
process.kill(worker.process.pid, 'SIGKILL');
}
}, this._shutdownTimeout);
// Send sigterm to the process, allowing to release ressources
// and save some states
return process.kill(worker.process.pid, 'SIGTERM');
if (worker.process.pid) {
return process.kill(worker.process.pid, 'SIGTERM');
} else {
return true;
}
});
}
}
module.exports = Clustering;

363
lib/algos/cache/GapCache.ts vendored Normal file
View File

@ -0,0 +1,363 @@
import { OrderedSet } from '@js-sdsl/ordered-set';
import {
default as GapSet,
GapSetEntry,
} from './GapSet';
// the API is similar but is not strictly a superset of GapSetInterface
// so we don't extend from it
export interface GapCacheInterface {
exposureDelayMs: number;
maxGapWeight: number;
size: number;
setGap: (firstKey: string, lastKey: string, weight: number) => void;
removeOverlappingGaps: (overlappingKeys: string[]) => number;
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
[Symbol.iterator]: () => Iterator<GapSetEntry>;
toArray: () => GapSetEntry[];
};
class GapCacheUpdateSet {
newGaps: GapSet;
updatedKeys: OrderedSet<string>;
constructor(maxGapWeight: number) {
this.newGaps = new GapSet(maxGapWeight);
this.updatedKeys = new OrderedSet();
}
addUpdateBatch(updatedKeys: OrderedSet<string>): void {
this.updatedKeys.union(updatedKeys);
}
};
/**
* Cache of listing "gaps" i.e. ranges of keys that can be skipped
* over during listing (because they only contain delete markers as
* latest versions).
*
* Typically, a single GapCache instance would be attached to a raft session.
*
* The API usage is as follows:
*
* - Initialize a GapCache instance by calling start() (this starts an internal timer)
*
* - Insert a gap or update an existing one via setGap()
*
* - Lookup existing gaps via lookupGap()
*
* - Invalidate gaps that overlap a specific set of keys via removeOverlappingGaps()
*
* - Shut down a GapCache instance by calling stop() (this stops the internal timer)
*
* Gaps inserted via setGap() are not exposed immediately to lookupGap(), but only:
*
* - after a certain delay always larger than 'exposureDelayMs' and usually shorter
* than twice this value (but might be slightly longer in rare cases)
*
* - and only if they haven't been invalidated by a recent call to removeOverlappingGaps()
*
* This ensures atomicity between gap creation and invalidation from updates under
* the condition that a gap is created from first key to last key within the time defined
* by 'exposureDelayMs'.
*
* The implementation is based on two extra temporary "update sets" on top of the main
* exposed gap set, one called "staging" and the other "frozen", each containing a
* temporary updated gap set and a list of updated keys to invalidate gaps with (coming
* from calls to removeOverlappingGaps()). Every "exposureDelayMs" milliseconds, the frozen
* gaps are invalidated by all key updates coming from either of the "staging" or "frozen"
* update set, then merged into the exposed gaps set, after which the staging updates become
* the frozen updates and won't receive any new gap until the next cycle.
*/
export default class GapCache implements GapCacheInterface {
_exposureDelayMs: number;
maxGaps: number;
_stagingUpdates: GapCacheUpdateSet;
_frozenUpdates: GapCacheUpdateSet;
_exposedGaps: GapSet;
_exposeFrozenInterval: NodeJS.Timeout | null;
/**
* @constructor
*
* @param {number} exposureDelayMs - minimum delay between
* insertion of a gap via setGap() and its exposure via
* lookupGap()
* @param {number} maxGaps - maximum number of cached gaps, after
* which no new gap can be added by setGap(). (Note: a future
* improvement could replace this by an eviction strategy)
* @param {number} maxGapWeight - maximum "weight" of individual
* cached gaps, which is also the granularity for
* invalidation. Individual gaps can be chained together,
* which lookupGap() transparently consolidates in the response
* into a single large gap.
*/
constructor(exposureDelayMs: number, maxGaps: number, maxGapWeight: number) {
this._exposureDelayMs = exposureDelayMs;
this.maxGaps = maxGaps;
this._stagingUpdates = new GapCacheUpdateSet(maxGapWeight);
this._frozenUpdates = new GapCacheUpdateSet(maxGapWeight);
this._exposedGaps = new GapSet(maxGapWeight);
this._exposeFrozenInterval = null;
}
/**
* Create a GapCache from an array of exposed gap entries (used in tests)
*
* @return {GapCache} - a new GapCache instance
*/
static createFromArray(
gaps: GapSetEntry[],
exposureDelayMs: number,
maxGaps: number,
maxGapWeight: number
): GapCache {
const gapCache = new GapCache(exposureDelayMs, maxGaps, maxGapWeight);
gapCache._exposedGaps = GapSet.createFromArray(gaps, maxGapWeight)
return gapCache;
}
/**
* Internal helper to remove gaps in the staging and frozen sets
* overlapping with previously updated keys, right before the
* frozen gaps get exposed.
*
* @return {undefined}
*/
_removeOverlappingGapsBeforeExpose(): void {
for (const { updatedKeys } of [this._stagingUpdates, this._frozenUpdates]) {
if (updatedKeys.size() === 0) {
continue;
}
for (const { newGaps } of [this._stagingUpdates, this._frozenUpdates]) {
if (newGaps.size === 0) {
continue;
}
newGaps.removeOverlappingGaps(updatedKeys);
}
}
}
/**
* This function is the core mechanism that updates the exposed gaps in the
* cache. It is called on a regular interval defined by 'exposureDelayMs'.
*
* It does the following in order:
*
* - remove gaps from the frozen set that overlap with any key present in a
* batch passed to removeOverlappingGaps() since the last two triggers of
* _exposeFrozen()
*
* - merge the remaining gaps from the frozen set to the exposed set, which
* makes them visible from calls to lookupGap()
*
* - rotate by freezing the currently staging updates and initiating a new
* staging updates set
*
* @return {undefined}
*/
_exposeFrozen(): void {
this._removeOverlappingGapsBeforeExpose();
for (const gap of this._frozenUpdates.newGaps) {
// Use a trivial strategy to keep the cache size within
// limits: refuse to add new gaps when the size is above
// the 'maxGaps' threshold. We solely rely on
// removeOverlappingGaps() to make space for new gaps.
if (this._exposedGaps.size < this.maxGaps) {
this._exposedGaps.setGap(gap.firstKey, gap.lastKey, gap.weight);
}
}
this._frozenUpdates = this._stagingUpdates;
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
}
/**
* Start the internal GapCache timer
*
* @return {undefined}
*/
start(): void {
if (this._exposeFrozenInterval) {
return;
}
this._exposeFrozenInterval = setInterval(
() => this._exposeFrozen(),
this._exposureDelayMs);
}
/**
* Stop the internal GapCache timer
*
* @return {undefined}
*/
stop(): void {
if (this._exposeFrozenInterval) {
clearInterval(this._exposeFrozenInterval);
this._exposeFrozenInterval = null;
}
}
/**
* Record a gap between two keys, associated with a weight to
* limit individual gap's spanning ranges in the cache, for a more
* granular invalidation.
*
* The function handles splitting and merging existing gaps to
* maintain an optimal weight of cache entries.
*
* NOTE 1: the caller must ensure that the full length of the gap
* between 'firstKey' and 'lastKey' has been built from a listing
* snapshot that is more recent than 'exposureDelayMs' milliseconds,
* in order to guarantee that the exposed gap will be fully
* covered (and potentially invalidated) from recent calls to
* removeOverlappingGaps().
*
* NOTE 2: a usual pattern when building a large gap from multiple
* calls to setGap() is to start the next gap from 'lastKey',
* which will be passed as 'firstKey' in the next call, so that
* gaps can be chained together and consolidated by lookupGap().
*
* @param {string} firstKey - first key of the gap
* @param {string} lastKey - last key of the gap, must be greater
* or equal than 'firstKey'
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
* @return {undefined}
*/
setGap(firstKey: string, lastKey: string, weight: number): void {
this._stagingUpdates.newGaps.setGap(firstKey, lastKey, weight);
}
/**
* Remove gaps that overlap with a given set of keys. Used to
* invalidate gaps when keys are inserted or deleted.
*
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that
* overlap with any of this set of keys
* @return {number} - how many gaps were removed from the exposed
* gaps only (overlapping gaps not yet exposed are also invalidated
* but are not accounted for in the returned value)
*/
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
let overlappingKeysSet;
if (Array.isArray(overlappingKeys)) {
overlappingKeysSet = new OrderedSet(overlappingKeys);
} else {
overlappingKeysSet = overlappingKeys;
}
this._stagingUpdates.addUpdateBatch(overlappingKeysSet);
return this._exposedGaps.removeOverlappingGaps(overlappingKeysSet);
}
/**
* Lookup the next exposed gap that overlaps with [minKey, maxKey]. Internally
* chained gaps are coalesced in the response into a single contiguous large gap.
*
* @param {string} minKey - minimum key overlapping with the returned gap
* @param {string} [maxKey] - maximum key overlapping with the returned gap
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
* was found, null otherwise, as a Promise
*/
lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
return this._exposedGaps.lookupGap(minKey, maxKey);
}
/**
* Get the maximum weight setting for individual gaps.
*
* @return {number} - maximum weight of individual gaps
*/
get maxGapWeight(): number {
return this._exposedGaps.maxWeight;
}
/**
* Set the maximum weight setting for individual gaps.
*
* @param {number} gapWeight - maximum weight of individual gaps
*/
set maxGapWeight(gapWeight: number) {
this._exposedGaps.maxWeight = gapWeight;
// also update transient gap sets
this._stagingUpdates.newGaps.maxWeight = gapWeight;
this._frozenUpdates.newGaps.maxWeight = gapWeight;
}
/**
* Get the exposure delay in milliseconds, which is the minimum
* time after which newly cached gaps will be exposed by
* lookupGap().
*
* @return {number} - exposure delay in milliseconds
*/
get exposureDelayMs(): number {
return this._exposureDelayMs;
}
/**
* Set the exposure delay in milliseconds, which is the minimum
* time after which newly cached gaps will be exposed by
* lookupGap(). Setting this attribute automatically updates the
* internal state to honor the new value.
*
* @param {number} - exposure delay in milliseconds
*/
set exposureDelayMs(exposureDelayMs: number) {
if (exposureDelayMs !== this._exposureDelayMs) {
this._exposureDelayMs = exposureDelayMs;
if (this._exposeFrozenInterval) {
// invalidate all pending gap updates, as the new interval may not be
// safe for them
this._stagingUpdates = new GapCacheUpdateSet(this.maxGapWeight);
this._frozenUpdates = new GapCacheUpdateSet(this.maxGapWeight);
// reinitialize the _exposeFrozenInterval timer with the updated delay
this.stop();
this.start();
}
}
}
/**
* Get the number of exposed gaps
*
* @return {number} number of exposed gaps
*/
get size(): number {
return this._exposedGaps.size;
}
/**
* Iterate over exposed gaps
*
* @return {Iterator<GapSetEntry>} an iterator over exposed gaps
*/
[Symbol.iterator](): Iterator<GapSetEntry> {
return this._exposedGaps[Symbol.iterator]();
}
/**
* Get an array of all exposed gaps
*
* @return {GapSetEntry[]} array of exposed gaps
*/
toArray(): GapSetEntry[] {
return this._exposedGaps.toArray();
}
/**
* Clear all exposed and staging gaps from the cache.
*
* Note: retains invalidating updates from removeOverlappingGaps()
* for correctness of gaps inserted afterwards.
*
* @return {undefined}
*/
clear(): void {
this._stagingUpdates.newGaps = new GapSet(this.maxGapWeight);
this._frozenUpdates.newGaps = new GapSet(this.maxGapWeight);
this._exposedGaps = new GapSet(this.maxGapWeight);
}
}

366
lib/algos/cache/GapSet.ts vendored Normal file
View File

@ -0,0 +1,366 @@
import assert from 'assert';
import { OrderedSet } from '@js-sdsl/ordered-set';
import errors from '../../errors';
export type GapSetEntry = {
firstKey: string,
lastKey: string,
weight: number,
};
export interface GapSetInterface {
maxWeight: number;
size: number;
setGap: (firstKey: string, lastKey: string, weight: number) => GapSetEntry;
removeOverlappingGaps: (overlappingKeys: string[]) => number;
lookupGap: (minKey: string, maxKey?: string) => Promise<GapSetEntry | null>;
[Symbol.iterator]: () => Iterator<GapSetEntry>;
toArray: () => GapSetEntry[];
};
/**
* Specialized data structure to support caching of listing "gaps",
* i.e. ranges of keys that can be skipped over during listing
* (because they only contain delete markers as latest versions)
*/
export default class GapSet implements GapSetInterface, Iterable<GapSetEntry> {
_gaps: OrderedSet<GapSetEntry>;
_maxWeight: number;
/**
* @constructor
* @param {number} maxWeight - weight threshold for each cached
* gap (unitless). Triggers splitting gaps when reached
*/
constructor(maxWeight: number) {
this._gaps = new OrderedSet(
[],
(left: GapSetEntry, right: GapSetEntry) => (
left.firstKey < right.firstKey ? -1 :
left.firstKey > right.firstKey ? 1 : 0
)
);
this._maxWeight = maxWeight;
}
/**
* Create a GapSet from an array of gap entries (used in tests)
*/
static createFromArray(gaps: GapSetEntry[], maxWeight: number): GapSet {
const gapSet = new GapSet(maxWeight);
for (const gap of gaps) {
gapSet._gaps.insert(gap);
}
return gapSet;
}
/**
* Record a gap between two keys, associated with a weight to limit
* individual gap sizes in the cache.
*
* The function handles splitting and merging existing gaps to
* maintain an optimal weight of cache entries.
*
* @param {string} firstKey - first key of the gap
* @param {string} lastKey - last key of the gap, must be greater
* or equal than 'firstKey'
* @param {number} weight - total weight between 'firstKey' and 'lastKey'
* @return {GapSetEntry} - existing or new gap entry
*/
setGap(firstKey: string, lastKey: string, weight: number): GapSetEntry {
assert(lastKey >= firstKey);
// Step 1/4: Find the closest left-overlapping gap, and either re-use it
// or chain it with a new gap depending on the weights if it exists (otherwise
// just creates a new gap).
const curGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey });
let curGap;
if (curGapIt.isAccessible()) {
curGap = curGapIt.pointer;
if (curGap.lastKey >= lastKey) {
// return fully overlapping gap already cached
return curGap;
}
}
let remainingWeight = weight;
if (!curGap // no previous gap
|| curGap.lastKey < firstKey // previous gap not overlapping
|| (curGap.lastKey === firstKey // previous gap overlapping by one key...
&& curGap.weight + weight > this._maxWeight) // ...but we can't extend it
) {
// create a new gap indexed by 'firstKey'
curGap = { firstKey, lastKey: firstKey, weight: 0 };
this._gaps.insert(curGap);
} else if (curGap.lastKey > firstKey && weight > this._maxWeight) {
// previous gap is either fully or partially contained in the new gap
// and cannot be extended: substract its weight from the total (heuristic
// in case the previous gap doesn't start at 'firstKey', which is the
// uncommon case)
remainingWeight -= curGap.weight;
// there may be an existing chained gap starting with the previous gap's
// 'lastKey': use it if it exists
const chainedGapIt = this._gaps.find(<GapSetEntry>{ firstKey: curGap.lastKey });
if (chainedGapIt.isAccessible()) {
curGap = chainedGapIt.pointer;
} else {
// no existing chained gap: chain a new gap to the previous gap
curGap = {
firstKey: curGap.lastKey,
lastKey: curGap.lastKey,
weight: 0,
};
this._gaps.insert(curGap);
}
}
// Step 2/4: Cleanup existing gaps fully included in firstKey -> lastKey, and
// aggregate their weights in curGap to define the minimum weight up to the
// last merged gap.
let nextGap;
while (true) {
const nextGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: curGap.firstKey });
nextGap = nextGapIt.isAccessible() && nextGapIt.pointer;
// stop the cleanup when no more gap or if the next gap is not fully
// included in curGap
if (!nextGap || nextGap.lastKey > lastKey) {
break;
}
this._gaps.eraseElementByIterator(nextGapIt);
curGap.lastKey = nextGap.lastKey;
curGap.weight += nextGap.weight;
}
// Step 3/4: Extend curGap to lastKey, adjusting the weight.
// At this point, curGap weight is the minimum weight of the finished gap, save it
// for step 4.
let minMergedWeight = curGap.weight;
if (curGap.lastKey === firstKey && firstKey !== lastKey) {
// extend the existing gap by the full amount 'firstKey -> lastKey'
curGap.lastKey = lastKey;
curGap.weight += remainingWeight;
} else if (curGap.lastKey <= lastKey) {
curGap.lastKey = lastKey;
curGap.weight = remainingWeight;
}
// Step 4/4: Find the closest right-overlapping gap, and if it exists, either merge
// it or chain it with curGap depending on the weights.
if (nextGap && nextGap.firstKey <= lastKey) {
// nextGap overlaps with the new gap: check if we can merge it
minMergedWeight += nextGap.weight;
let mergedWeight;
if (lastKey === nextGap.firstKey) {
// nextGap is chained with curGap: add the full weight of nextGap
mergedWeight = curGap.weight + nextGap.weight;
} else {
// strict overlap: don't add nextGap's weight unless
// it's larger than the sum of merged ranges (as it is
// then included in `minMergedWeight`)
mergedWeight = Math.max(curGap.weight, minMergedWeight);
}
if (mergedWeight <= this._maxWeight) {
// merge nextGap into curGap
curGap.lastKey = nextGap.lastKey;
curGap.weight = mergedWeight;
this._gaps.eraseElementByKey(nextGap);
} else {
// adjust the last key to chain with nextGap and substract the next
// gap's weight from curGap (heuristic)
curGap.lastKey = nextGap.firstKey;
curGap.weight = Math.max(mergedWeight - nextGap.weight, 0);
curGap = nextGap;
}
}
// return a copy of curGap
return Object.assign({}, curGap);
}
/**
* Remove gaps that overlap with one or more keys in a given array or
* OrderedSet. Used to invalidate gaps when keys are inserted or deleted.
*
* @param {OrderedSet<string> | string[]} overlappingKeys - remove gaps that overlap
* with any of this set of keys
* @return {number} - how many gaps were removed
*/
removeOverlappingGaps(overlappingKeys: OrderedSet<string> | string[]): number {
// To optimize processing with a large number of keys and/or gaps, this function:
//
// 1. converts the overlappingKeys array to a OrderedSet (if not already a OrderedSet)
// 2. queries both the gaps set and the overlapping keys set in a loop, which allows:
// - skipping ranges of overlapping keys at once when there is no new overlapping gap
// - skipping ranges of gaps at once when there is no overlapping key
//
// This way, it is efficient when the number of non-overlapping gaps is large
// (which is the most common case in practice).
let overlappingKeysSet;
if (Array.isArray(overlappingKeys)) {
overlappingKeysSet = new OrderedSet(overlappingKeys);
} else {
overlappingKeysSet = overlappingKeys;
}
const firstKeyIt = overlappingKeysSet.begin();
let currentKey = firstKeyIt.isAccessible() && firstKeyIt.pointer;
let nRemoved = 0;
while (currentKey) {
const closestGapIt = this._gaps.reverseUpperBound(<GapSetEntry>{ firstKey: currentKey });
if (closestGapIt.isAccessible()) {
const closestGap = closestGapIt.pointer;
if (currentKey <= closestGap.lastKey) {
// currentKey overlaps closestGap: remove the gap
this._gaps.eraseElementByIterator(closestGapIt);
nRemoved += 1;
}
}
const nextGapIt = this._gaps.lowerBound(<GapSetEntry>{ firstKey: currentKey });
if (!nextGapIt.isAccessible()) {
// no more gap: we're done
return nRemoved;
}
const nextGap = nextGapIt.pointer;
// advance to the last key potentially overlapping with nextGap
let currentKeyIt = overlappingKeysSet.reverseLowerBound(nextGap.lastKey);
if (currentKeyIt.isAccessible()) {
currentKey = currentKeyIt.pointer;
if (currentKey >= nextGap.firstKey) {
// currentKey overlaps nextGap: remove the gap
this._gaps.eraseElementByIterator(nextGapIt);
nRemoved += 1;
}
}
// advance to the first key potentially overlapping with another gap
currentKeyIt = overlappingKeysSet.lowerBound(nextGap.lastKey);
currentKey = currentKeyIt.isAccessible() && currentKeyIt.pointer;
}
return nRemoved;
}
/**
* Internal helper to coalesce multiple chained gaps into a single gap.
*
* It is only used to construct lookupGap() return values and
* doesn't modify the GapSet.
*
* NOTE: The function may take a noticeable amount of time and CPU
* to execute if a large number of chained gaps have to be
* coalesced, but it should never take more than a few seconds. In
* most cases it should take less than a millisecond. It regularly
* yields to the nodejs event loop to avoid blocking it during a
* long execution.
*
* @param {GapSetEntry} firstGap - first gap of the chain to coalesce with
* the next ones in the chain
* @return {Promise<GapSetEntry>} - a new coalesced entry, as a Promise
*/
_coalesceGapChain(firstGap: GapSetEntry): Promise<GapSetEntry> {
return new Promise(resolve => {
const coalescedGap: GapSetEntry = Object.assign({}, firstGap);
const coalesceGapChainIteration = () => {
// efficiency trade-off: 100 iterations of log(N) complexity lookups should
// not block the event loop for too long
for (let opCounter = 0; opCounter < 100; ++opCounter) {
const chainedGapIt = this._gaps.find(
<GapSetEntry>{ firstKey: coalescedGap.lastKey });
if (!chainedGapIt.isAccessible()) {
// chain is complete
return resolve(coalescedGap);
}
const chainedGap = chainedGapIt.pointer;
if (chainedGap.firstKey === chainedGap.lastKey) {
// found a single-key gap: chain is complete
return resolve(coalescedGap);
}
coalescedGap.lastKey = chainedGap.lastKey;
coalescedGap.weight += chainedGap.weight;
}
// yield to the event loop before continuing the process
// of coalescing the gap chain
return process.nextTick(coalesceGapChainIteration);
};
coalesceGapChainIteration();
});
}
/**
* Lookup the next gap that overlaps with [minKey, maxKey]. Internally chained
* gaps are coalesced in the response into a single contiguous large gap.
*
* @param {string} minKey - minimum key overlapping with the returned gap
* @param {string} [maxKey] - maximum key overlapping with the returned gap
* @return {Promise<GapSetEntry | null>} - result of the lookup if a gap
* was found, null otherwise, as a Promise
*/
async lookupGap(minKey: string, maxKey?: string): Promise<GapSetEntry | null> {
let firstGap: GapSetEntry | null = null;
const minGapIt = this._gaps.reverseLowerBound(<GapSetEntry>{ firstKey: minKey });
const minGap = minGapIt.isAccessible() && minGapIt.pointer;
if (minGap && minGap.lastKey >= minKey) {
firstGap = minGap;
} else {
const maxGapIt = this._gaps.upperBound(<GapSetEntry>{ firstKey: minKey });
const maxGap = maxGapIt.isAccessible() && maxGapIt.pointer;
if (maxGap && (maxKey === undefined || maxGap.firstKey <= maxKey)) {
firstGap = maxGap;
}
}
if (!firstGap) {
return null;
}
return this._coalesceGapChain(firstGap);
}
/**
* Get the maximum weight setting for individual gaps.
*
* @return {number} - maximum weight of individual gaps
*/
get maxWeight(): number {
return this._maxWeight;
}
/**
* Set the maximum weight setting for individual gaps.
*
* @param {number} gapWeight - maximum weight of individual gaps
*/
set maxWeight(gapWeight: number) {
this._maxWeight = gapWeight;
}
/**
* Get the number of gaps stored in this set.
*
* @return {number} - number of gaps stored in this set
*/
get size(): number {
return this._gaps.size();
}
/**
* Iterate over each gap of the set, ordered by first key
*
* @return {Iterator<GapSetEntry>} - an iterator over all gaps
* Example:
* for (const gap of myGapSet) { ... }
*/
[Symbol.iterator](): Iterator<GapSetEntry> {
return this._gaps[Symbol.iterator]();
}
/**
* Return an array containing all gaps, ordered by first key
*
* NOTE: there is a toArray() method in the OrderedSet implementation
* but it does not scale well and overflows the stack quickly. This is
* why we provide an implementation based on an iterator.
*
* @return {GapSetEntry[]} - an array containing all gaps
*/
toArray(): GapSetEntry[] {
return [...this];
}
}

124
lib/algos/heap/Heap.ts Normal file
View File

@ -0,0 +1,124 @@
export enum HeapOrder {
Min = -1,
Max = 1,
}
export enum CompareResult {
LT = -1,
EQ = 0,
GT = 1,
}
export type CompareFunction = (x: any, y: any) => CompareResult;
export class Heap {
size: number;
_maxSize: number;
_order: HeapOrder;
_heap: any[];
_cmpFn: CompareFunction;
constructor(size: number, order: HeapOrder, cmpFn: CompareFunction) {
this.size = 0;
this._maxSize = size;
this._order = order;
this._cmpFn = cmpFn;
this._heap = new Array<any>(this._maxSize);
}
_parent(i: number): number {
return Math.floor((i - 1) / 2);
}
_left(i: number): number {
return Math.floor((2 * i) + 1);
}
_right(i: number): number {
return Math.floor((2 * i) + 2);
}
_shouldSwap(childIdx: number, parentIdx: number): boolean {
return this._cmpFn(this._heap[childIdx], this._heap[parentIdx]) as number === this._order as number;
}
_swap(i: number, j: number) {
const tmp = this._heap[i];
this._heap[i] = this._heap[j];
this._heap[j] = tmp;
}
_heapify(i: number) {
const l = this._left(i);
const r = this._right(i);
let c = i;
if (l < this.size && this._shouldSwap(l, c)) {
c = l;
}
if (r < this.size && this._shouldSwap(r, c)) {
c = r;
}
if (c != i) {
this._swap(c, i);
this._heapify(c);
}
}
add(item: any): any {
if (this.size >= this._maxSize) {
return new Error('Max heap size reached');
}
++this.size;
let c = this.size - 1;
this._heap[c] = item;
while (c > 0) {
if (!this._shouldSwap(c, this._parent(c))) {
return null;
}
this._swap(c, this._parent(c));
c = this._parent(c);
}
return null;
};
remove(): any {
if (this.size <= 0) {
return null;
}
const ret = this._heap[0];
this._heap[0] = this._heap[this.size - 1];
this._heapify(0);
--this.size;
return ret;
};
peek(): any {
if (this.size <= 0) {
return null;
}
return this._heap[0];
};
}
export class MinHeap extends Heap {
constructor(size: number, cmpFn: CompareFunction) {
super(size, HeapOrder.Min, cmpFn);
}
}
export class MaxHeap extends Heap {
constructor(size: number, cmpFn: CompareFunction) {
super(size, HeapOrder.Max, cmpFn);
}
}

View File

@ -1,6 +1,6 @@
'use strict'; // eslint-disable-line strict
const { FILTER_SKIP, SKIP_NONE } = require('./tools');
const { FILTER_ACCEPT, SKIP_NONE } = require('./tools');
// Use a heuristic to amortize the cost of JSON
// serialization/deserialization only on largest metadata where the
@ -92,21 +92,26 @@ class Extension {
* @param {object} entry - a listing entry from metadata
* expected format: { key, value }
* @return {number} - result of filtering the entry:
* > 0: entry is accepted and included in the result
* = 0: entry is accepted but not included (skipping)
* < 0: entry is not accepted, listing should finish
* FILTER_ACCEPT: entry is accepted and may or not be included
* in the result
* FILTER_SKIP: listing may skip directly (with "gte" param) to
* the key returned by the skipping() method
* FILTER_END: the results are complete, listing can be stopped
*/
filter(entry) {
return entry ? FILTER_SKIP : FILTER_SKIP;
filter(/* entry: { key, value } */) {
return FILTER_ACCEPT;
}
/**
* Provides the insight into why filter is skipping an entry. This could be
* because it is skipping a range of delimited keys or a range of specific
* version when doing master version listing.
* Provides the next key at which the listing task is allowed to skip to.
* This could allow to skip over:
* - a key prefix ending with the delimiter
* - all remaining versions of an object when doing a current
* versions listing in v0 format
* - a cached "gap" of deleted objects when doing a current
* versions listing in v0 format
*
* @return {string} - the insight: a common prefix or a master key,
* or SKIP_NONE if there is no insight
* @return {string} - the next key at which the listing task is allowed to skip to
*/
skipping() {
return SKIP_NONE;

View File

@ -1,7 +1,7 @@
'use strict'; // eslint-disable-line strict
const { inc, checkLimit, listingParamsMasterKeysV0ToV1,
FILTER_END, FILTER_ACCEPT } = require('./tools');
FILTER_END, FILTER_ACCEPT, SKIP_NONE } = require('./tools');
const DEFAULT_MAX_KEYS = 1000;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
@ -163,7 +163,7 @@ class MultipartUploads {
}
skipping() {
return '';
return SKIP_NONE;
}
/**

View File

@ -2,7 +2,7 @@
const Extension = require('./Extension').default;
const { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
const { checkLimit, FILTER_END, FILTER_ACCEPT } = require('./tools');
const DEFAULT_MAX_KEYS = 10000;
/**
@ -91,7 +91,7 @@ class List extends Extension {
* < 0 : listing done
*/
filter(elem) {
// Check first in case of maxkeys <= 0
// Check if the result array is full
if (this.keys >= this.maxKeys) {
return FILTER_END;
}
@ -99,7 +99,7 @@ class List extends Extension {
this.filterKeyStartsWith !== undefined) &&
typeof elem === 'object' &&
!this.customFilter(elem.value)) {
return FILTER_SKIP;
return FILTER_ACCEPT;
}
if (typeof elem === 'object') {
this.res.push({

View File

@ -1,274 +0,0 @@
'use strict'; // eslint-disable-line strict
const Extension = require('./Extension').default;
const { inc, listingParamsMasterKeysV0ToV1,
FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
const VSConst = require('../../versioning/constants').VersioningConstants;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
/**
* Find the common prefix in the path
*
* @param {String} key - path of the object
* @param {String} delimiter - separator
* @param {Number} delimiterIndex - 'folder' index in the path
* @return {String} - CommonPrefix
*/
function getCommonPrefix(key, delimiter, delimiterIndex) {
return key.substring(0, delimiterIndex + delimiter.length);
}
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
class Delimiter extends Extension {
/**
* Create a new Delimiter instance
* @constructor
* @param {Object} parameters - listing parameters
* @param {String} [parameters.delimiter] - delimiter per amazon
* format
* @param {String} [parameters.prefix] - prefix per amazon
* format
* @param {String} [parameters.marker] - marker per amazon
* format
* @param {Number} [parameters.maxKeys] - number of keys to list
* @param {Boolean} [parameters.v2] - indicates whether v2
* format
* @param {String} [parameters.startAfter] - marker per amazon
* format
* @param {String} [parameters.continuationToken] - obfuscated amazon
* token
* @param {Boolean} [parameters.alphabeticalOrder] - Either the result is
* alphabetically ordered
* or not
* @param {RequestLogger} logger - The logger of the
* request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger);
// original listing parameters
this.delimiter = parameters.delimiter;
this.prefix = parameters.prefix;
this.marker = parameters.marker;
this.maxKeys = parameters.maxKeys || 1000;
this.startAfter = parameters.startAfter;
this.continuationToken = parameters.continuationToken;
this.alphabeticalOrder =
typeof parameters.alphabeticalOrder !== 'undefined' ?
parameters.alphabeticalOrder : true;
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
// results
this.CommonPrefixes = [];
this.Contents = [];
this.IsTruncated = false;
this.NextMarker = parameters.marker;
this.NextContinuationToken =
parameters.continuationToken || parameters.startAfter;
this.startMarker = parameters.v2 ? 'startAfter' : 'marker';
this.continueMarker = parameters.v2 ? 'continuationToken' : 'marker';
this.nextContinueMarker = parameters.v2 ?
'NextContinuationToken' : 'NextMarker';
if (this.delimiter !== undefined &&
this[this.nextContinueMarker] !== undefined &&
this[this.nextContinueMarker].startsWith(this.prefix || '')) {
const nextDelimiterIndex =
this[this.nextContinueMarker].indexOf(this.delimiter,
this.prefix ? this.prefix.length : 0);
this[this.nextContinueMarker] =
this[this.nextContinueMarker].slice(0, nextDelimiterIndex +
this.delimiter.length);
}
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
getObjectKey: this.getObjectKeyV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
getObjectKey: this.getObjectKeyV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
}
genMDParamsV0() {
const params = {};
if (this.prefix) {
params.gte = this.prefix;
params.lt = inc(this.prefix);
}
const startVal = this[this.continueMarker] || this[this.startMarker];
if (startVal) {
if (params.gte && params.gte > startVal) {
return params;
}
delete params.gte;
params.gt = startVal;
}
return params;
}
genMDParamsV1() {
const params = this.genMDParamsV0();
return listingParamsMasterKeysV0ToV1(params);
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys() {
if (this.keys >= this.maxKeys) {
// In cases of maxKeys <= 0 -> IsTruncated = false
this.IsTruncated = this.maxKeys > 0;
return true;
}
return false;
}
/**
* Add a (key, value) tuple to the listing
* Set the NextMarker to the current key
* Increment the keys counter
* @param {String} key - The key to add
* @param {String} value - The value of the key
* @return {number} - indicates if iteration should continue
*/
addContents(key, value) {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.Contents.push({ key, value: this.trimMetadata(value) });
this[this.nextContinueMarker] = key;
++this.keys;
return FILTER_ACCEPT;
}
getObjectKeyV0(obj) {
return obj.key;
}
getObjectKeyV1(obj) {
return obj.key.slice(DbPrefixes.Master.length);
}
/**
* Filter to apply on each iteration, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filter(obj) {
const key = this.getObjectKey(obj);
const value = obj.value;
if ((this.prefix && !key.startsWith(this.prefix))
|| (this.alphabeticalOrder
&& typeof this[this.nextContinueMarker] === 'string'
&& key <= this[this.nextContinueMarker])) {
return FILTER_SKIP;
}
if (this.delimiter) {
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {
return this.addContents(key, value);
}
return this.addCommonPrefix(key, delimiterIndex);
}
return this.addContents(key, value);
}
/**
* Add a Common Prefix in the list
* @param {String} key - object name
* @param {Number} index - after prefix starting point
* @return {Boolean} - indicates if iteration should continue
*/
addCommonPrefix(key, index) {
const commonPrefix = getCommonPrefix(key, this.delimiter, index);
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
&& this[this.nextContinueMarker] !== commonPrefix) {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.CommonPrefixes.push(commonPrefix);
this[this.nextContinueMarker] = commonPrefix;
++this.keys;
return FILTER_ACCEPT;
}
return FILTER_SKIP;
}
/**
* If repd happens to want to skip listing on a bucket in v0
* versioning key format, here is an idea.
*
* @return {string} - the present range (NextMarker) if repd believes
* that it's enough and should move on
*/
skippingV0() {
return this[this.nextContinueMarker];
}
/**
* If repd happens to want to skip listing on a bucket in v1
* versioning key format, here is an idea.
*
* @return {string} - the present range (NextMarker) if repd believes
* that it's enough and should move on
*/
skippingV1() {
return DbPrefixes.Master + this[this.nextContinueMarker];
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result() {
/* NextMarker is only provided when delimiter is used.
* specified in v1 listing documentation
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
*/
const result = {
CommonPrefixes: this.CommonPrefixes,
Contents: this.Contents,
IsTruncated: this.IsTruncated,
Delimiter: this.delimiter,
};
if (this.parameters.v2) {
result.NextContinuationToken = this.IsTruncated
? this.NextContinuationToken : undefined;
} else {
result.NextMarker = (this.IsTruncated && this.delimiter)
? this.NextMarker : undefined;
}
return result;
}
}
module.exports = { Delimiter };

356
lib/algos/list/delimiter.ts Normal file
View File

@ -0,0 +1,356 @@
'use strict'; // eslint-disable-line strict
const Extension = require('./Extension').default;
const { inc, listingParamsMasterKeysV0ToV1,
FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
const VSConst = require('../../versioning/constants').VersioningConstants;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
export interface FilterState {
id: number,
};
export interface FilterReturnValue {
FILTER_ACCEPT,
FILTER_SKIP,
FILTER_END,
};
export const enum DelimiterFilterStateId {
NotSkipping = 1,
SkippingPrefix = 2,
};
export interface DelimiterFilterState_NotSkipping extends FilterState {
id: DelimiterFilterStateId.NotSkipping,
};
export interface DelimiterFilterState_SkippingPrefix extends FilterState {
id: DelimiterFilterStateId.SkippingPrefix,
prefix: string;
};
type KeyHandler = (key: string, value: string) => FilterReturnValue;
export type ResultObject = {
CommonPrefixes: string[];
Contents: {
key: string;
value: string;
}[];
IsTruncated: boolean;
Delimiter ?: string;
NextMarker ?: string;
NextContinuationToken ?: string;
};
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
export class Delimiter extends Extension {
state: FilterState;
keyHandlers: { [id: number]: KeyHandler };
/**
* Create a new Delimiter instance
* @constructor
* @param {Object} parameters - listing parameters
* @param {String} [parameters.delimiter] - delimiter per amazon
* format
* @param {String} [parameters.prefix] - prefix per amazon
* format
* @param {String} [parameters.marker] - marker per amazon
* format
* @param {Number} [parameters.maxKeys] - number of keys to list
* @param {Boolean} [parameters.v2] - indicates whether v2
* format
* @param {String} [parameters.startAfter] - marker per amazon
* format
* @param {String} [parameters.continuationToken] - obfuscated amazon
* token
* @param {RequestLogger} logger - The logger of the
* request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger);
// original listing parameters
this.delimiter = parameters.delimiter;
this.prefix = parameters.prefix;
this.maxKeys = parameters.maxKeys || 1000;
if (parameters.v2) {
this.marker = parameters.continuationToken || parameters.startAfter;
} else {
this.marker = parameters.marker;
}
this.nextMarker = this.marker;
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
// results
this.CommonPrefixes = [];
this.Contents = [];
this.IsTruncated = false;
this.keyHandlers = {};
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
getObjectKey: this.getObjectKeyV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
getObjectKey: this.getObjectKeyV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
// if there is a delimiter, we may skip ranges by prefix,
// hence using the NotSkippingPrefix flavor that checks the
// subprefix up to the delimiter for the NotSkipping state
if (this.delimiter) {
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NotSkippingPrefix.bind(this));
} else {
// listing without a delimiter never has to skip over any
// prefix -> use NeverSkipping flavor for the NotSkipping
// state
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NeverSkipping.bind(this));
}
this.setKeyHandler(
DelimiterFilterStateId.SkippingPrefix,
this.keyHandler_SkippingPrefix.bind(this));
this.state = <DelimiterFilterState_NotSkipping> {
id: DelimiterFilterStateId.NotSkipping,
};
}
genMDParamsV0() {
const params: { gt ?: string, gte ?: string, lt ?: string } = {};
if (this.prefix) {
params.gte = this.prefix;
params.lt = inc(this.prefix);
}
if (this.marker && this.delimiter) {
const commonPrefix = this.getCommonPrefix(this.marker);
if (commonPrefix) {
const afterPrefix = inc(commonPrefix);
if (!params.gte || afterPrefix > params.gte) {
params.gte = afterPrefix;
}
}
}
if (this.marker && (!params.gte || this.marker >= params.gte)) {
delete params.gte;
params.gt = this.marker;
}
return params;
}
genMDParamsV1() {
const params = this.genMDParamsV0();
return listingParamsMasterKeysV0ToV1(params);
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys(): boolean {
if (this.keys >= this.maxKeys) {
// In cases of maxKeys <= 0 -> IsTruncated = false
this.IsTruncated = this.maxKeys > 0;
return true;
}
return false;
}
/**
* Add a (key, value) tuple to the listing
* Set the NextMarker to the current key
* Increment the keys counter
* @param {String} key - The key to add
* @param {String} value - The value of the key
* @return {number} - indicates if iteration should continue
*/
addContents(key: string, value: string): void {
this.Contents.push({ key, value: this.trimMetadata(value) });
++this.keys;
this.nextMarker = key;
}
getCommonPrefix(key: string): string | undefined {
if (!this.delimiter) {
return undefined;
}
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {
return undefined;
}
return key.substring(0, delimiterIndex + this.delimiter.length);
}
/**
* Add a Common Prefix in the list
* @param {String} commonPrefix - common prefix to add
* @param {String} key - full key starting with commonPrefix
* @return {Boolean} - indicates if iteration should continue
*/
addCommonPrefix(commonPrefix: string, key: string): void {
// add the new prefix to the list
this.CommonPrefixes.push(commonPrefix);
++this.keys;
this.nextMarker = commonPrefix;
}
addCommonPrefixOrContents(key: string, value: string): string | undefined {
// add the subprefix to the common prefixes if the key has the delimiter
const commonPrefix = this.getCommonPrefix(key);
if (commonPrefix) {
this.addCommonPrefix(commonPrefix, key);
return commonPrefix;
}
this.addContents(key, value);
return undefined;
}
getObjectKeyV0(obj: { key: string }): string {
return obj.key;
}
getObjectKeyV1(obj: { key: string }): string {
return obj.key.slice(DbPrefixes.Master.length);
}
/**
* Filter to apply on each iteration, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filter(obj: { key: string, value: string }): FilterReturnValue {
const key = this.getObjectKey(obj);
const value = obj.value;
return this.handleKey(key, value);
}
setState(state: FilterState): void {
this.state = state;
}
setKeyHandler(stateId: number, keyHandler: KeyHandler): void {
this.keyHandlers[stateId] = keyHandler;
}
handleKey(key: string, value: string): FilterReturnValue {
return this.keyHandlers[this.state.id](key, value);
}
keyHandler_NeverSkipping(key: string, value: string): FilterReturnValue {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.addContents(key, value);
return FILTER_ACCEPT;
}
keyHandler_NotSkippingPrefix(key: string, value: string): FilterReturnValue {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
const commonPrefix = this.addCommonPrefixOrContents(key, value);
if (commonPrefix) {
// transition into SkippingPrefix state to skip all following keys
// while they start with the same prefix
this.setState(<DelimiterFilterState_SkippingPrefix> {
id: DelimiterFilterStateId.SkippingPrefix,
prefix: commonPrefix,
});
}
return FILTER_ACCEPT;
}
keyHandler_SkippingPrefix(key: string, value: string): FilterReturnValue {
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
if (key.startsWith(prefix)) {
return FILTER_SKIP;
}
this.setState(<DelimiterFilterState_NotSkipping> {
id: DelimiterFilterStateId.NotSkipping,
});
return this.handleKey(key, value);
}
skippingBase(): string | undefined {
switch (this.state.id) {
case DelimiterFilterStateId.SkippingPrefix:
const { prefix } = <DelimiterFilterState_SkippingPrefix> this.state;
return inc(prefix);
default:
return SKIP_NONE;
}
}
skippingV0() {
return this.skippingBase();
}
skippingV1() {
const skipTo = this.skippingBase();
if (skipTo === SKIP_NONE) {
return SKIP_NONE;
}
return DbPrefixes.Master + skipTo;
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result(): ResultObject {
/* NextMarker is only provided when delimiter is used.
* specified in v1 listing documentation
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
*/
const result: ResultObject = {
CommonPrefixes: this.CommonPrefixes,
Contents: this.Contents,
IsTruncated: this.IsTruncated,
Delimiter: this.delimiter,
};
if (this.parameters.v2) {
result.NextContinuationToken = this.IsTruncated
? this.nextMarker : undefined;
} else {
result.NextMarker = (this.IsTruncated && this.delimiter)
? this.nextMarker : undefined;
}
return result;
}
}

View File

@ -0,0 +1,127 @@
const { DelimiterMaster } = require('./delimiterMaster');
const { FILTER_ACCEPT, FILTER_END } = require('./tools');
type ResultObject = {
Contents: {
key: string;
value: string;
}[];
IsTruncated: boolean;
NextMarker ?: string;
};
/**
* Handle object listing with parameters. This extends the base class DelimiterMaster
* to return the master/current versions.
*/
class DelimiterCurrent extends DelimiterMaster {
/**
* Delimiter listing of current versions.
* @param {Object} parameters - listing parameters
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
* @param {String} parameters.excludedDataStoreName - excluded datatore name
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
this.beforeDate = parameters.beforeDate;
this.excludedDataStoreName = parameters.excludedDataStoreName;
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
this.scannedKeys = 0;
}
genMDParamsV0() {
const params = super.genMDParamsV0();
// lastModified and dataStoreName parameters are used by metadata that enables built-in filtering,
// a feature currently exclusive to MongoDB
if (this.beforeDate) {
params.lastModified = {
lt: this.beforeDate,
};
}
if (this.excludedDataStoreName) {
params.dataStoreName = {
ne: this.excludedDataStoreName,
}
}
return params;
}
/**
* Parses the stringified entry's value.
* @param s - sringified value
* @return - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
} catch (e: any) {
this.logger.warn(
'Could not parse Object Metadata while listing',
{ err: e.toString() });
}
return p;
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
*
* specialized implementation on DelimiterCurrent to also check
* the number of scanned keys
*
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys(): boolean {
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return true;
}
return super._reachedMaxKeys();
}
addContents(key, value) {
++this.scannedKeys;
const parsedValue = this._parse(value);
// if parsing fails, skip the key.
if (parsedValue) {
const lastModified = parsedValue['last-modified'];
const dataStoreName = parsedValue.dataStoreName;
// We then check if the current version is older than the "beforeDate" and
// "excludedDataStoreName" is not specified or if specified and the data store name is different.
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) &&
(!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName)) {
super.addContents(key, value);
}
// In the event of a timeout occurring before any content is added,
// NextMarker is updated even if the object is not eligible.
// It minimizes the amount of data that the client needs to re-process if the request times out.
this.nextMarker = key;
}
}
result(): object {
const result: ResultObject = {
Contents: this.Contents,
IsTruncated: this.IsTruncated,
};
if (this.IsTruncated) {
result.NextMarker = this.nextMarker;
}
return result;
}
}
module.exports = { DelimiterCurrent };

View File

@ -1,196 +0,0 @@
'use strict'; // eslint-disable-line strict
const Delimiter = require('./delimiter').Delimiter;
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { BucketVersioningKeyFormat } = VSConst;
const { FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes } = VSConst;
/**
* Handle object listing with parameters. This extends the base class Delimiter
* to return the raw master versions of existing objects.
*/
class DelimiterMaster extends Delimiter {
/**
* Delimiter listing of master versions.
* @param {Object} parameters - listing parameters
* @param {String} parameters.delimiter - delimiter per amazon format
* @param {String} parameters.prefix - prefix per amazon format
* @param {String} parameters.marker - marker per amazon format
* @param {Number} parameters.maxKeys - number of keys to list
* @param {Boolean} parameters.v2 - indicates whether v2 format
* @param {String} parameters.startAfter - marker per amazon v2 format
* @param {String} parameters.continuationToken - obfuscated amazon token
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
// non-PHD master version or a version whose master is a PHD version
this.prvKey = undefined;
this.prvPHDKey = undefined;
this.inReplayPrefix = false;
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
filter: this.filterV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
filter: this.filterV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
}
/**
* Filter to apply on each iteration for buckets in v0 format,
* based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV0(obj) {
let key = obj.key;
const value = obj.value;
if (key.startsWith(DbPrefixes.Replay)) {
this.inReplayPrefix = true;
return FILTER_SKIP;
}
this.inReplayPrefix = false;
/* Skip keys not starting with the prefix or not alphabetically
* ordered. */
if ((this.prefix && !key.startsWith(this.prefix))
|| (typeof this[this.nextContinueMarker] === 'string' &&
key <= this[this.nextContinueMarker])) {
return FILTER_SKIP;
}
/* Skip version keys (<key><versionIdSeparator><version>) if we already
* have a master version. */
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex >= 0) {
key = key.slice(0, versionIdIndex);
/* - key === this.prvKey is triggered when a master version has
* been accepted for this key,
* - key === this.NextMarker or this.NextContinueToken is triggered
* when a listing page ends on an accepted obj and the next page
* starts with a version of this object.
* In that case prvKey is default set to undefined
* in the constructor and comparing to NextMarker is the only
* way to know we should not accept this version. This test is
* not redundant with the one at the beginning of this function,
* we are comparing here the key without the version suffix,
* - key startsWith the previous NextMarker happens because we set
* NextMarker to the common prefix instead of the whole key
* value. (TODO: remove this test once ZENKO-1048 is fixed)
* */
if (key === this.prvKey || key === this[this.nextContinueMarker] ||
(this.delimiter &&
key.startsWith(this[this.nextContinueMarker]))) {
/* master version already filtered */
return FILTER_SKIP;
}
}
if (Version.isPHD(value)) {
/* master version is a PHD version, we want to wait for the next
* one:
* - Set the prvKey to undefined to not skip the next version,
* - return accept to avoid users to skip the next values in range
* (skip scan mechanism in metadata backend like Metadata or
* MongoClient). */
this.prvKey = undefined;
this.prvPHDKey = key;
return FILTER_ACCEPT;
}
if (Version.isDeleteMarker(value)) {
/* This entry is a deleteMarker which has not been filtered by the
* version test. Either :
* - it is a deleteMarker on the master version, we want to SKIP
* all the following entries with this key (no master version),
* - or a deleteMarker following a PHD (setting prvKey to undefined
* when an entry is a PHD avoids the skip on version for the
* next entry). In that case we expect the master version to
* follow. */
if (key === this.prvPHDKey) {
this.prvKey = undefined;
return FILTER_ACCEPT;
}
this.prvKey = key;
return FILTER_SKIP;
}
this.prvKey = key;
if (this.delimiter) {
// check if the key has the delimiter
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex >= 0) {
// try to add the prefix to the list
return this.addCommonPrefix(key, delimiterIndex);
}
}
return this.addContents(key, value);
}
/**
* Filter to apply on each iteration for buckets in v1 format,
* based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV1(obj) {
// Filtering master keys in v1 is simply listing the master
// keys, as the state of version keys do not change the
// result, so we can use Delimiter method directly.
return super.filter(obj);
}
skippingBase() {
if (this[this.nextContinueMarker]) {
// next marker or next continuation token:
// - foo/ : skipping foo/
// - foo : skipping foo.
const index = this[this.nextContinueMarker].
lastIndexOf(this.delimiter);
if (index === this[this.nextContinueMarker].length - 1) {
return this[this.nextContinueMarker];
}
return this[this.nextContinueMarker] + VID_SEP;
}
return SKIP_NONE;
}
skippingV0() {
if (this.inReplayPrefix) {
return DbPrefixes.Replay;
}
return this.skippingBase();
}
skippingV1() {
const skipTo = this.skippingBase();
if (skipTo === SKIP_NONE) {
return SKIP_NONE;
}
return DbPrefixes.Master + skipTo;
}
}
module.exports = { DelimiterMaster };

View File

@ -0,0 +1,620 @@
import {
Delimiter,
FilterState,
FilterReturnValue,
DelimiterFilterStateId,
DelimiterFilterState_NotSkipping,
DelimiterFilterState_SkippingPrefix,
ResultObject,
} from './delimiter';
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { BucketVersioningKeyFormat } = VSConst;
const { FILTER_ACCEPT, FILTER_SKIP, FILTER_END, SKIP_NONE, inc } = require('./tools');
import { GapSetEntry } from '../cache/GapSet';
import { GapCacheInterface } from '../cache/GapCache';
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes } = VSConst;
export const enum DelimiterMasterFilterStateId {
SkippingVersionsV0 = 101,
WaitVersionAfterPHDV0 = 102,
SkippingGapV0 = 103,
};
interface DelimiterMasterFilterState_SkippingVersionsV0 extends FilterState {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
masterKey: string,
};
interface DelimiterMasterFilterState_WaitVersionAfterPHDV0 extends FilterState {
id: DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
masterKey: string,
};
interface DelimiterMasterFilterState_SkippingGapV0 extends FilterState {
id: DelimiterMasterFilterStateId.SkippingGapV0,
};
export const enum GapCachingState {
NoGapCache = 0, // there is no gap cache
UnknownGap = 1, // waiting for a cache lookup
GapLookupInProgress = 2, // asynchronous gap lookup in progress
GapCached = 3, // an upcoming or already skippable gap is cached
NoMoreGap = 4, // the cache doesn't have any more gaps inside the listed range
};
type GapCachingInfo_NoGapCache = {
state: GapCachingState.NoGapCache;
};
type GapCachingInfo_NoCachedGap = {
state: GapCachingState.UnknownGap
| GapCachingState.GapLookupInProgress
gapCache: GapCacheInterface;
};
type GapCachingInfo_GapCached = {
state: GapCachingState.GapCached;
gapCache: GapCacheInterface;
gapCached: GapSetEntry;
};
type GapCachingInfo_NoMoreGap = {
state: GapCachingState.NoMoreGap;
};
type GapCachingInfo = GapCachingInfo_NoGapCache
| GapCachingInfo_NoCachedGap
| GapCachingInfo_GapCached
| GapCachingInfo_NoMoreGap;
export const enum GapBuildingState {
Disabled = 0, // no gap cache or no gap building needed (e.g. in V1 versioning format)
NotBuilding = 1, // not currently building a gap (i.e. not listing within a gap)
Building = 2, // currently building a gap (i.e. listing within a gap)
Expired = 3, // not allowed to build due to exposure delay timeout
};
type GapBuildingInfo_NothingToBuild = {
state: GapBuildingState.Disabled | GapBuildingState.Expired;
};
type GapBuildingParams = {
/**
* minimum weight for a gap to be created in the cache
*/
minGapWeight: number;
/**
* trigger a cache setGap() call every N skippable keys
*/
triggerSaveGapWeight: number;
/**
* timestamp to assess whether we're still inside the validity period to
* be allowed to build gaps
*/
initTimestamp: number;
};
type GapBuildingInfo_NotBuilding = {
state: GapBuildingState.NotBuilding;
gapCache: GapCacheInterface;
params: GapBuildingParams;
};
type GapBuildingInfo_Building = {
state: GapBuildingState.Building;
gapCache: GapCacheInterface;
params: GapBuildingParams;
/**
* Gap currently being created
*/
gap: GapSetEntry;
/**
* total current weight of the gap being created
*/
gapWeight: number;
};
type GapBuildingInfo = GapBuildingInfo_NothingToBuild
| GapBuildingInfo_NotBuilding
| GapBuildingInfo_Building;
/**
* Handle object listing with parameters. This extends the base class Delimiter
* to return the raw master versions of existing objects.
*/
export class DelimiterMaster extends Delimiter {
_gapCaching: GapCachingInfo;
_gapBuilding: GapBuildingInfo;
_refreshedBuildingParams: GapBuildingParams | null;
/**
* Delimiter listing of master versions.
* @param {Object} parameters - listing parameters
* @param {String} [parameters.delimiter] - delimiter per amazon format
* @param {String} [parameters.prefix] - prefix per amazon format
* @param {String} [parameters.marker] - marker per amazon format
* @param {Number} [parameters.maxKeys] - number of keys to list
* @param {Boolean} [parameters.v2] - indicates whether v2 format
* @param {String} [parameters.startAfter] - marker per amazon v2 format
* @param {String} [parameters.continuationToken] - obfuscated amazon token
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat="v0"] - versioning key format
*/
constructor(parameters, logger, vFormat?: string) {
super(parameters, logger, vFormat);
if (this.vFormat === BucketVersioningKeyFormat.v0) {
// override Delimiter's implementation of NotSkipping for
// DelimiterMaster logic (skipping versions and special
// handling of delete markers and PHDs)
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NotSkippingPrefixNorVersionsV0.bind(this));
// add extra state handlers specific to DelimiterMaster with v0 format
this.setKeyHandler(
DelimiterMasterFilterStateId.SkippingVersionsV0,
this.keyHandler_SkippingVersionsV0.bind(this));
this.setKeyHandler(
DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
this.keyHandler_WaitVersionAfterPHDV0.bind(this));
this.setKeyHandler(
DelimiterMasterFilterStateId.SkippingGapV0,
this.keyHandler_SkippingGapV0.bind(this));
if (this.marker) {
// distinct initial state to include some special logic
// before the first master key is found that does not have
// to be checked afterwards
this.state = <DelimiterMasterFilterState_SkippingVersionsV0> {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
masterKey: this.marker,
};
} else {
this.state = <DelimiterFilterState_NotSkipping> {
id: DelimiterFilterStateId.NotSkipping,
};
}
} else {
// save base implementation of the `NotSkipping` state in
// Delimiter before overriding it with ours, to be able to call it from there
this.keyHandler_NotSkipping_Delimiter = this.keyHandlers[DelimiterFilterStateId.NotSkipping];
this.setKeyHandler(
DelimiterFilterStateId.NotSkipping,
this.keyHandler_NotSkippingPrefixNorVersionsV1.bind(this));
}
// in v1, we can directly use Delimiter's implementation,
// which is already set to the proper state
// default initialization of the gap cache and building states, can be
// set by refreshGapCache()
this._gapCaching = {
state: GapCachingState.NoGapCache,
};
this._gapBuilding = {
state: GapBuildingState.Disabled,
};
this._refreshedBuildingParams = null;
}
/**
* Get the validity period left before a refresh of the gap cache is needed
* to continue building new gaps.
*
* @return {number|null} one of:
* - the remaining time in milliseconds in which gaps can be added to the
* cache before a call to refreshGapCache() is required
* - or 0 if there is no time left and a call to refreshGapCache() is required
* to resume caching gaps
* - or null if refreshing the cache is never needed (because the gap cache
* is either not available or not used)
*/
getGapBuildingValidityPeriodMs(): number | null {
let gapBuilding;
switch (this._gapBuilding.state) {
case GapBuildingState.Disabled:
return null;
case GapBuildingState.Expired:
return 0;
case GapBuildingState.NotBuilding:
gapBuilding = <GapBuildingInfo_NotBuilding> this._gapBuilding;
break;
case GapBuildingState.Building:
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
break;
}
const { gapCache, params } = gapBuilding;
const elapsedTime = Date.now() - params.initTimestamp;
return Math.max(gapCache.exposureDelayMs - elapsedTime, 0);
}
/**
* Refresh the gaps caching logic (gaps are series of current delete markers
* in V0 bucket metadata format). It has two effects:
*
* - starts exposing existing and future gaps from the cache to efficiently
* skip over series of current delete markers that have been seen and cached
* earlier
*
* - enables building and caching new gaps (or extend existing ones), for a
* limited time period defined by the `gapCacheProxy.exposureDelayMs` value
* in milliseconds. To refresh the validity period and resume building and
* caching new gaps, one must restart a new listing from the database (starting
* at the current listing key, included), then call refreshGapCache() again.
*
* @param {GapCacheInterface} gapCacheProxy - API proxy to the gaps cache
* (the proxy should handle prefixing object keys with the bucket name)
* @param {number} [minGapWeight=100] - minimum weight of a gap for it to be
* added in the cache
* @param {number} [triggerSaveGapWeight] - cumulative weight to wait for
* before saving the current building gap. Cannot be greater than
* `gapCacheProxy.maxGapWeight` (the value is thresholded to `maxGapWeight`
* otherwise). Defaults to `gapCacheProxy.maxGapWeight / 2`.
* @return {undefined}
*/
refreshGapCache(
gapCacheProxy: GapCacheInterface,
minGapWeight?: number,
triggerSaveGapWeight?: number
): void {
if (this.vFormat !== BucketVersioningKeyFormat.v0) {
return;
}
if (this._gapCaching.state === GapCachingState.NoGapCache) {
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache: gapCacheProxy,
};
}
const refreshedBuildingParams: GapBuildingParams = {
minGapWeight: minGapWeight || 100,
triggerSaveGapWeight: triggerSaveGapWeight
|| Math.trunc(gapCacheProxy.maxGapWeight / 2),
initTimestamp: Date.now(),
};
if (this._gapBuilding.state === GapBuildingState.Building) {
// refreshed params will be applied as soon as the current building gap is saved
this._refreshedBuildingParams = refreshedBuildingParams;
} else {
this._gapBuilding = {
state: GapBuildingState.NotBuilding,
gapCache: gapCacheProxy,
params: refreshedBuildingParams,
};
}
}
/**
* Trigger a lookup of the closest upcoming or already skippable gap.
*
* @param {string} fromKey - lookup a gap not before 'fromKey'
* @return {undefined} - the lookup is asynchronous and its
* response is handled inside this function
*/
_triggerGapLookup(gapCaching: GapCachingInfo_NoCachedGap, fromKey: string): void {
this._gapCaching = {
state: GapCachingState.GapLookupInProgress,
gapCache: gapCaching.gapCache,
};
const maxKey = this.prefix ? inc(this.prefix) : undefined;
gapCaching.gapCache.lookupGap(fromKey, maxKey).then(_gap => {
const gap = <GapSetEntry | null> _gap;
if (gap) {
this._gapCaching = {
state: GapCachingState.GapCached,
gapCache: gapCaching.gapCache,
gapCached: gap,
};
} else {
this._gapCaching = {
state: GapCachingState.NoMoreGap,
};
}
});
}
_checkGapOnMasterDeleteMarker(key: string): FilterReturnValue {
switch (this._gapBuilding.state) {
case GapBuildingState.Disabled:
case GapBuildingState.Expired:
break;
case GapBuildingState.NotBuilding:
this._createBuildingGap(key, 1);
break;
case GapBuildingState.Building:
this._updateBuildingGap(key);
break;
}
if (this._gapCaching.state === GapCachingState.GapCached) {
const { gapCached } = this._gapCaching;
if (key >= gapCached.firstKey) {
if (key <= gapCached.lastKey) {
// we are inside the last looked up cached gap: transition to
// 'SkippingGapV0' state
this.setState(<DelimiterMasterFilterState_SkippingGapV0> {
id: DelimiterMasterFilterStateId.SkippingGapV0,
});
// cut the current gap before skipping, it will be merged or
// chained with the existing one (depending on its weight)
if (this._gapBuilding.state === GapBuildingState.Building) {
// substract 1 from the weight because we are going to chain this gap,
// which has an overlap of one key.
this._gapBuilding.gap.weight -= 1;
this._cutBuildingGap();
}
return FILTER_SKIP;
}
// as we are past the cached gap, we will need another lookup
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache: this._gapCaching.gapCache,
};
}
}
if (this._gapCaching.state === GapCachingState.UnknownGap) {
this._triggerGapLookup(this._gapCaching, key);
}
return FILTER_ACCEPT;
}
filter_onNewMasterKeyV0(key: string, value: string): FilterReturnValue {
// if this master key is a delete marker, accept it without
// adding the version to the contents
if (Version.isDeleteMarker(value)) {
// update the state to start skipping versions of the new master key
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
masterKey: key,
});
return this._checkGapOnMasterDeleteMarker(key);
}
if (Version.isPHD(value)) {
// master version is a PHD version: wait for the first
// following version that will be considered as the actual
// master key
this.setState(<DelimiterMasterFilterState_WaitVersionAfterPHDV0> {
id: DelimiterMasterFilterStateId.WaitVersionAfterPHDV0,
masterKey: key,
});
return FILTER_ACCEPT;
}
// cut the current gap as soon as a non-deleted entry is seen
this._cutBuildingGap();
if (key.startsWith(DbPrefixes.Replay)) {
// skip internal replay prefix entirely
this.setState(<DelimiterFilterState_SkippingPrefix> {
id: DelimiterFilterStateId.SkippingPrefix,
prefix: DbPrefixes.Replay,
});
return FILTER_SKIP;
}
if (this._reachedMaxKeys()) {
return FILTER_END;
}
const commonPrefix = this.addCommonPrefixOrContents(key, value);
if (commonPrefix) {
// transition into SkippingPrefix state to skip all following keys
// while they start with the same prefix
this.setState(<DelimiterFilterState_SkippingPrefix> {
id: DelimiterFilterStateId.SkippingPrefix,
prefix: commonPrefix,
});
return FILTER_ACCEPT;
}
// update the state to start skipping versions of the new master key
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
masterKey: key,
});
return FILTER_ACCEPT;
}
keyHandler_NotSkippingPrefixNorVersionsV0(key: string, value: string): FilterReturnValue {
return this.filter_onNewMasterKeyV0(key, value);
}
filter_onNewMasterKeyV1(key: string, value: string): FilterReturnValue {
// if this master key is a delete marker, accept it without
// adding the version to the contents
if (Version.isDeleteMarker(value)) {
return FILTER_ACCEPT;
}
// use base Delimiter's implementation
return this.keyHandler_NotSkipping_Delimiter(key, value);
}
keyHandler_NotSkippingPrefixNorVersionsV1(key: string, value: string): FilterReturnValue {
return this.filter_onNewMasterKeyV1(key, value);
}
keyHandler_SkippingVersionsV0(key: string, value: string): FilterReturnValue {
/* In the SkippingVersionsV0 state, skip all version keys
* (<key><versionIdSeparator><version>) */
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex !== -1) {
// version keys count in the building gap weight because they must
// also be listed until skipped
if (this._gapBuilding.state === GapBuildingState.Building) {
this._updateBuildingGap(key);
}
return FILTER_SKIP;
}
return this.filter_onNewMasterKeyV0(key, value);
}
keyHandler_WaitVersionAfterPHDV0(key: string, value: string): FilterReturnValue {
// After a PHD key is encountered, the next version key of the
// same object if it exists is the new master key, hence
// consider it as such and call 'onNewMasterKeyV0' (the test
// 'masterKey == phdKey' is probably redundant when we already
// know we have a versioned key, since all objects in v0 have
// a master key, but keeping it in doubt)
const { masterKey: phdKey } = <DelimiterMasterFilterState_WaitVersionAfterPHDV0> this.state;
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex !== -1) {
const masterKey = key.slice(0, versionIdIndex);
if (masterKey === phdKey) {
return this.filter_onNewMasterKeyV0(masterKey, value);
}
}
return this.filter_onNewMasterKeyV0(key, value);
}
keyHandler_SkippingGapV0(key: string, value: string): FilterReturnValue {
const { gapCache, gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
if (key <= gapCached.lastKey) {
return FILTER_SKIP;
}
this._gapCaching = {
state: GapCachingState.UnknownGap,
gapCache,
};
this.setState(<DelimiterMasterFilterState_SkippingVersionsV0> {
id: DelimiterMasterFilterStateId.SkippingVersionsV0,
});
// Start a gap with weight=0 from the latest skippable key. This will
// allow to extend the gap just skipped with a chained gap in case
// other delete markers are seen after the existing gap is skipped.
this._createBuildingGap(gapCached.lastKey, 0, gapCached.weight);
return this.handleKey(key, value);
}
skippingBase(): string | undefined {
switch (this.state.id) {
case DelimiterMasterFilterStateId.SkippingVersionsV0:
const { masterKey } = <DelimiterMasterFilterState_SkippingVersionsV0> this.state;
return masterKey + inc(VID_SEP);
case DelimiterMasterFilterStateId.SkippingGapV0:
const { gapCached } = <GapCachingInfo_GapCached> this._gapCaching;
return gapCached.lastKey;
default:
return super.skippingBase();
}
}
result(): ResultObject {
this._cutBuildingGap();
return super.result();
}
_checkRefreshedBuildingParams(params: GapBuildingParams): GapBuildingParams {
if (this._refreshedBuildingParams) {
const newParams = this._refreshedBuildingParams;
this._refreshedBuildingParams = null;
return newParams;
}
return params;
}
/**
* Save the gap being built if allowed (i.e. still within the
* allocated exposure time window).
*
* @return {boolean} - true if the gap was saved, false if we are
* outside the allocated exposure time window.
*/
_saveBuildingGap(): boolean {
const { gapCache, params, gap, gapWeight } =
<GapBuildingInfo_Building> this._gapBuilding;
const totalElapsed = Date.now() - params.initTimestamp;
if (totalElapsed >= gapCache.exposureDelayMs) {
this._gapBuilding = {
state: GapBuildingState.Expired,
};
this._refreshedBuildingParams = null;
return false;
}
const { firstKey, lastKey, weight } = gap;
gapCache.setGap(firstKey, lastKey, weight);
this._gapBuilding = {
state: GapBuildingState.Building,
gapCache,
params: this._checkRefreshedBuildingParams(params),
gap: {
firstKey: gap.lastKey,
lastKey: gap.lastKey,
weight: 0,
},
gapWeight,
};
return true;
}
/**
* Create a new gap to be extended afterwards
*
* @param {string} newKey - gap's first key
* @param {number} startWeight - initial weight of the building gap (usually 0 or 1)
* @param {number} [cachedWeight] - if continuing a cached gap, weight of the existing
* cached portion
* @return {undefined}
*/
_createBuildingGap(newKey: string, startWeight: number, cachedWeight?: number): void {
if (this._gapBuilding.state === GapBuildingState.NotBuilding) {
const { gapCache, params } = <GapBuildingInfo_NotBuilding> this._gapBuilding;
this._gapBuilding = {
state: GapBuildingState.Building,
gapCache,
params: this._checkRefreshedBuildingParams(params),
gap: {
firstKey: newKey,
lastKey: newKey,
weight: startWeight,
},
gapWeight: (cachedWeight || 0) + startWeight,
};
}
}
_updateBuildingGap(newKey: string): void {
const gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
const { params, gap } = gapBuilding;
gap.lastKey = newKey;
gap.weight += 1;
gapBuilding.gapWeight += 1;
// the GapCache API requires updating a gap regularly because it can only split
// it once per update, by the known last key. In practice the default behavior
// is to trigger an update after a number of keys that is half the maximum weight.
// It is also useful for other listings to benefit from the cache sooner.
if (gapBuilding.gapWeight >= params.minGapWeight &&
gap.weight >= params.triggerSaveGapWeight) {
this._saveBuildingGap();
}
}
_cutBuildingGap(): void {
if (this._gapBuilding.state === GapBuildingState.Building) {
let gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
let { gapCache, params, gap, gapWeight } = gapBuilding;
// only set gaps that are significant enough in weight and
// with a non-empty extension
if (gapWeight >= params.minGapWeight && gap.weight > 0) {
// we're done if we were not allowed to save the gap
if (!this._saveBuildingGap()) {
return;
}
// params may have been refreshed, reload them
gapBuilding = <GapBuildingInfo_Building> this._gapBuilding;
params = gapBuilding.params;
}
this._gapBuilding = {
state: GapBuildingState.NotBuilding,
gapCache,
params,
};
}
}
}

View File

@ -0,0 +1,202 @@
const { DelimiterVersions } = require('./delimiterVersions');
const { FILTER_END, FILTER_SKIP } = require('./tools');
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
/**
* Handle object listing with parameters. This extends the base class DelimiterVersions
* to return the raw non-current versions objects.
*/
class DelimiterNonCurrent extends DelimiterVersions {
/**
* Delimiter listing of non-current versions.
* @param {Object} parameters - listing parameters
* @param {String} parameters.keyMarker - key marker
* @param {String} parameters.versionIdMarker - version id marker
* @param {String} parameters.beforeDate - limit the response to keys with stale date older than beforeDate.
* stale date is the date on when a version becomes non-current.
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {String} parameters.excludedDataStoreName - exclude dataStoreName matches from the versions
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
this.beforeDate = parameters.beforeDate;
this.excludedDataStoreName = parameters.excludedDataStoreName;
this.maxScannedLifecycleListingEntries = parameters.maxScannedLifecycleListingEntries;
// internal state
this.prevKey = null;
this.staleDate = null;
this.scannedKeys = 0;
}
getLastModified(value) {
let lastModified;
try {
const v = JSON.parse(value);
lastModified = v['last-modified'];
} catch (e) {
this.logger.warn('could not parse Object Metadata while listing',
{
method: 'getLastModified',
err: e.toString(),
});
}
return lastModified;
}
// Overwrite keyHandler_SkippingVersions to include the last version from the previous listing.
// The creation (last-modified) date of this version will be the stale date for the following version.
// eslint-disable-next-line camelcase
keyHandler_SkippingVersions(key, versionId, value) {
if (key === this.keyMarker) {
// since the nonversioned key equals the marker, there is
// necessarily a versionId in this key
const _versionId = versionId;
if (_versionId < this.versionIdMarker) {
// skip all versions until marker
return FILTER_SKIP;
}
}
this.setState({
id: 1 /* NotSkipping */,
});
return this.handleKey(key, versionId, value);
}
filter(obj) {
if (this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries) {
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return FILTER_END;
}
++this.scannedKeys;
return super.filter(obj);
}
/**
* NOTE: Each version of a specific key is sorted from the latest to the oldest
* thanks to the way version ids are generated.
* DESCRIPTION: Skip the version if it represents the master key, but keep its last-modified date in memory,
* which will be the stale date of the following version.
* The following version is pushed only:
* - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
* - if "beforeDate" is not specified or if specified and the "stale date" is older.
* - if "excludedDataStoreName" is not specified or if specified and the data store name is different
* The in-memory "stale date" is then updated with the version's last-modified date to be used for
* the following version.
* The process stops and returns the available results if either:
* - no more metadata key is left to be processed
* - the listing reaches the maximum number of key to be returned
* - the internal timeout is reached
* @param {String} key - The key to add
* @param {String} versionId - The version id
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key, versionId, value) {
this.nextKeyMarker = key;
this.nextVersionIdMarker = versionId;
// Skip the version if it represents the non-current version, but keep its last-modified date,
// which will be the stale date of the following version.
const isCurrentVersion = key !== this.prevKey;
if (isCurrentVersion) {
this.staleDate = this.getLastModified(value);
this.prevKey = key;
return;
}
// The following version is pushed only:
// - if the "stale date" (picked up from the previous version) is available (JSON.parse has not failed),
// - if "beforeDate" is not specified or if specified and the "stale date" is older.
// - if "excludedDataStoreName" is not specified or if specified and the data store name is different
let lastModified;
if (this.staleDate && (!this.beforeDate || this.staleDate < this.beforeDate)) {
const parsedValue = this._parse(value);
// if parsing fails, skip the key.
if (parsedValue) {
const dataStoreName = parsedValue.dataStoreName;
lastModified = parsedValue['last-modified'];
if (!this.excludedDataStoreName || dataStoreName !== this.excludedDataStoreName) {
const s = this._stringify(parsedValue, this.staleDate);
// check that _stringify succeeds to only push objects with a defined staleDate.
if (s) {
this.Versions.push({ key, value: s });
++this.keys;
}
}
}
}
// The in-memory "stale date" is then updated with the version's last-modified date to be used for
// the following version.
this.staleDate = lastModified || this.getLastModified(value);
return;
}
/**
* Parses the stringified entry's value and remove the location property if too large.
* @param {string} s - sringified value
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
delete p.location;
}
} catch (e) {
this.logger.warn('Could not parse Object Metadata while listing', {
method: 'DelimiterNonCurrent._parse',
err: e.toString(),
});
}
return p;
}
_stringify(parsedMD, staleDate) {
const p = parsedMD;
let s = undefined;
p.staleDate = staleDate;
try {
s = JSON.stringify(p);
} catch (e) {
this.logger.warn('could not stringify Object Metadata while listing', {
method: 'DelimiterNonCurrent._stringify',
err: e.toString(),
});
}
return s;
}
result() {
const { Versions, IsTruncated, NextKeyMarker, NextVersionIdMarker } = super.result();
const result = {
Contents: Versions,
IsTruncated,
};
if (NextKeyMarker) {
result.NextKeyMarker = NextKeyMarker;
}
if (NextVersionIdMarker) {
result.NextVersionIdMarker = NextVersionIdMarker;
}
return result;
}
}
module.exports = { DelimiterNonCurrent };

View File

@ -0,0 +1,204 @@
const DelimiterVersions = require('./delimiterVersions').DelimiterVersions;
const { FILTER_END } = require('./tools');
const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
/**
* Handle object listing with parameters. This extends the base class DelimiterVersions
* to return the orphan delete markers. Orphan delete markers are also
* refered as expired object delete marker.
* They are delete marker with zero noncurrent versions.
*/
class DelimiterOrphanDeleteMarker extends DelimiterVersions {
/**
* Delimiter listing of orphan delete markers.
* @param {Object} parameters - listing parameters
* @param {String} parameters.beforeDate - limit the response to keys older than beforeDate
* @param {Number} parameters.maxScannedLifecycleListingEntries - max number of entries to be scanned
* @param {RequestLogger} logger - The logger of the request
* @param {String} [vFormat] - versioning key format
*/
constructor(parameters, logger, vFormat) {
const {
marker,
maxKeys,
prefix,
beforeDate,
maxScannedLifecycleListingEntries,
} = parameters;
const versionParams = {
// The orphan delete marker logic uses the term 'marker' instead of 'keyMarker',
// as the latter could suggest the presence of a 'versionIdMarker'.
keyMarker: marker,
maxKeys,
prefix,
};
super(versionParams, logger, vFormat);
this.maxScannedLifecycleListingEntries = maxScannedLifecycleListingEntries;
this.beforeDate = beforeDate;
// this.prevKeyName is used as a marker for the next listing when the current one reaches its entry limit.
// We cannot rely on this.keyName, as it contains the name of the current key.
// In the event of a listing interruption due to reaching the maximum scanned entries,
// relying on this.keyName would cause the next listing to skip the current key because S3 starts
// listing after the marker.
this.prevKeyName = null;
this.keyName = null;
this.value = null;
this.scannedKeys = 0;
}
_reachedMaxKeys() {
if (this.keys >= this.maxKeys) {
return true;
}
return false;
}
_addOrphan() {
const parsedValue = this._parse(this.value);
// if parsing fails, skip the key.
if (parsedValue) {
const lastModified = parsedValue['last-modified'];
const isDeleteMarker = parsedValue.isDeleteMarker;
// We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
if ((!this.beforeDate || (lastModified && lastModified < this.beforeDate)) && isDeleteMarker) {
// Prefer returning an untrimmed data rather than stopping the service in case of parsing failure.
const s = this._stringify(parsedValue) || this.value;
this.Versions.push({ key: this.keyName, value: s });
this.nextKeyMarker = this.keyName;
++this.keys;
}
}
}
/**
* Parses the stringified entry's value and remove the location property if too large.
* @param {string} s - sringified value
* @return {object} p - undefined if parsing fails, otherwise it contains the parsed value.
*/
_parse(s) {
let p;
try {
p = JSON.parse(s);
if (s.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
delete p.location;
}
} catch (e) {
this.logger.warn('Could not parse Object Metadata while listing', {
method: 'DelimiterOrphanDeleteMarker._parse',
err: e.toString(),
});
}
return p;
}
_stringify(value) {
const p = value;
let s = undefined;
try {
s = JSON.stringify(p);
} catch (e) {
this.logger.warn('could not stringify Object Metadata while listing',
{
method: 'DelimiterOrphanDeleteMarker._stringify',
err: e.toString(),
});
}
return s;
}
/**
* The purpose of _isMaxScannedEntriesReached is to restrict the number of scanned entries,
* thus controlling resource overhead (CPU...).
* @return {boolean} isMaxScannedEntriesReached - true if the maximum limit on the number
* of entries scanned has been reached, false otherwise.
*/
_isMaxScannedEntriesReached() {
return this.maxScannedLifecycleListingEntries && this.scannedKeys >= this.maxScannedLifecycleListingEntries;
}
filter(obj) {
if (this._isMaxScannedEntriesReached()) {
this.nextKeyMarker = this.prevKeyName;
this.IsTruncated = true;
this.logger.info('listing stopped due to reaching the maximum scanned entries limit',
{
maxScannedLifecycleListingEntries: this.maxScannedLifecycleListingEntries,
scannedKeys: this.scannedKeys,
});
return FILTER_END;
}
++this.scannedKeys;
return super.filter(obj);
}
/**
* NOTE: Each version of a specific key is sorted from the latest to the oldest
* thanks to the way version ids are generated.
* DESCRIPTION: For a given key, the latest version is kept in memory since it is the current version.
* If the following version reference a new key, it means that the previous one was an orphan version.
* We then check if the orphan version is a delete marker and if it is older than the "beforeDate"
* The process stops and returns the available results if either:
* - no more metadata key is left to be processed
* - the listing reaches the maximum number of key to be returned
* - the internal timeout is reached
* NOTE: we cannot leverage MongoDB to list keys older than "beforeDate"
* because then we will not be able to assess its orphanage.
* @param {String} key - The object key.
* @param {String} versionId - The object version id.
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key, versionId, value) {
// For a given key, the youngest version is kept in memory since it represents the current version.
if (key !== this.keyName) {
// If this.value is defined, it means that <this.keyName, this.value> pair is "allowed" to be an orphan.
if (this.value) {
this._addOrphan();
}
this.prevKeyName = this.keyName;
this.keyName = key;
this.value = value;
return;
}
// If the key is not the current version, we can skip it in the next listing
// in the case where the current listing is interrupted due to reaching the maximum scanned entries.
this.prevKeyName = key;
this.keyName = key;
this.value = null;
return;
}
result() {
// Only check for remaining last orphan delete marker if the listing is not interrupted.
// This will help avoid false positives.
if (!this._isMaxScannedEntriesReached()) {
// The following check makes sure the last orphan delete marker is not forgotten.
if (this.keys < this.maxKeys) {
if (this.value) {
this._addOrphan();
}
// The following make sure that if makeKeys is reached, isTruncated is set to true.
// We moved the "isTruncated" from _reachedMaxKeys to make sure we take into account the last entity
// if listing is truncated right before the last entity and the last entity is a orphan delete marker.
} else {
this.IsTruncated = this.maxKeys > 0;
}
}
const result = {
Contents: this.Versions,
IsTruncated: this.IsTruncated,
};
if (this.IsTruncated) {
result.NextMarker = this.nextKeyMarker;
}
return result;
}
}
module.exports = { DelimiterOrphanDeleteMarker };

View File

@ -1,279 +0,0 @@
'use strict'; // eslint-disable-line strict
const Delimiter = require('./delimiter').Delimiter;
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
class DelimiterVersions extends Delimiter {
constructor(parameters, logger, vFormat) {
super(parameters, logger, vFormat);
// specific to version listing
this.keyMarker = parameters.keyMarker;
this.versionIdMarker = parameters.versionIdMarker;
// internal state
this.masterKey = undefined;
this.masterVersionId = undefined;
// listing results
this.NextMarker = parameters.keyMarker;
this.NextVersionIdMarker = undefined;
this.inReplayPrefix = false;
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
filter: this.filterV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
filter: this.filterV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
}
genMDParamsV0() {
const params = {};
if (this.parameters.prefix) {
params.gte = this.parameters.prefix;
params.lt = inc(this.parameters.prefix);
}
if (this.parameters.keyMarker) {
if (params.gte && params.gte > this.parameters.keyMarker) {
return params;
}
delete params.gte;
if (this.parameters.versionIdMarker) {
// versionIdMarker should always come with keyMarker
// but may not be the other way around
params.gt = this.parameters.keyMarker
+ VID_SEP
+ this.parameters.versionIdMarker;
} else {
params.gt = inc(this.parameters.keyMarker + VID_SEP);
}
}
return params;
}
genMDParamsV1() {
// return an array of two listing params sets to ask for
// synchronized listing of M and V ranges
const params = [{}, {}];
if (this.parameters.prefix) {
params[0].gte = DbPrefixes.Master + this.parameters.prefix;
params[0].lt = DbPrefixes.Master + inc(this.parameters.prefix);
params[1].gte = DbPrefixes.Version + this.parameters.prefix;
params[1].lt = DbPrefixes.Version + inc(this.parameters.prefix);
} else {
params[0].gte = DbPrefixes.Master;
params[0].lt = inc(DbPrefixes.Master); // stop after the last master key
params[1].gte = DbPrefixes.Version;
params[1].lt = inc(DbPrefixes.Version); // stop after the last version key
}
if (this.parameters.keyMarker) {
if (params[1].gte <= DbPrefixes.Version + this.parameters.keyMarker) {
delete params[0].gte;
delete params[1].gte;
params[0].gt = DbPrefixes.Master + inc(this.parameters.keyMarker + VID_SEP);
if (this.parameters.versionIdMarker) {
// versionIdMarker should always come with keyMarker
// but may not be the other way around
params[1].gt = DbPrefixes.Version
+ this.parameters.keyMarker
+ VID_SEP
+ this.parameters.versionIdMarker;
} else {
params[1].gt = DbPrefixes.Version
+ inc(this.parameters.keyMarker + VID_SEP);
}
}
}
return params;
}
/**
* Used to synchronize listing of M and V prefixes by object key
*
* @param {object} masterObj object listed from first range
* returned by genMDParamsV1() (the master keys range)
* @param {object} versionObj object listed from second range
* returned by genMDParamsV1() (the version keys range)
* @return {number} comparison result:
* * -1 if master key < version key
* * 1 if master key > version key
*/
compareObjects(masterObj, versionObj) {
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
return masterKey < versionKey ? -1 : 1;
}
/**
* Add a (key, versionId, value) tuple to the listing.
* Set the NextMarker to the current key
* Increment the keys counter
* @param {object} obj - the entry to add to the listing result
* @param {String} obj.key - The key to add
* @param {String} obj.versionId - versionId
* @param {String} obj.value - The value of the key
* @return {Boolean} - indicates if iteration should continue
*/
addContents(obj) {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
this.Contents.push({
key: obj.key,
value: this.trimMetadata(obj.value),
versionId: obj.versionId,
});
this.NextMarker = obj.key;
this.NextVersionIdMarker = obj.versionId;
++this.keys;
return FILTER_ACCEPT;
}
/**
* Filter to apply on each iteration if bucket is in v0
* versioning key format, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV0(obj) {
if (obj.key.startsWith(DbPrefixes.Replay)) {
this.inReplayPrefix = true;
return FILTER_SKIP;
}
this.inReplayPrefix = false;
if (Version.isPHD(obj.value)) {
// return accept to avoid skipping the next values in range
return FILTER_ACCEPT;
}
return this.filterCommon(obj.key, obj.value);
}
/**
* Filter to apply on each iteration if bucket is in v1
* versioning key format, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filterV1(obj) {
// this function receives both M and V keys, but their prefix
// length is the same so we can remove their prefix without
// looking at the type of key
return this.filterCommon(obj.key.slice(DbPrefixes.Master.length),
obj.value);
}
filterCommon(key, value) {
if (this.prefix && !key.startsWith(this.prefix)) {
return FILTER_SKIP;
}
let nonversionedKey;
let versionId = undefined;
const versionIdIndex = key.indexOf(VID_SEP);
if (versionIdIndex < 0) {
nonversionedKey = key;
this.masterKey = key;
this.masterVersionId =
Version.from(value).getVersionId() || 'null';
versionId = this.masterVersionId;
} else {
nonversionedKey = key.slice(0, versionIdIndex);
versionId = key.slice(versionIdIndex + 1);
// skip a version key if it is the master version
if (this.masterKey === nonversionedKey && this.masterVersionId === versionId) {
return FILTER_SKIP;
}
this.masterKey = undefined;
this.masterVersionId = undefined;
}
if (this.delimiter) {
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = nonversionedKey.indexOf(this.delimiter, baseIndex);
if (delimiterIndex >= 0) {
return this.addCommonPrefix(nonversionedKey, delimiterIndex);
}
}
return this.addContents({ key: nonversionedKey, value, versionId });
}
skippingV0() {
if (this.inReplayPrefix) {
return DbPrefixes.Replay;
}
if (this.NextMarker) {
const index = this.NextMarker.lastIndexOf(this.delimiter);
if (index === this.NextMarker.length - 1) {
return this.NextMarker;
}
}
return SKIP_NONE;
}
skippingV1() {
const skipV0 = this.skippingV0();
if (skipV0 === SKIP_NONE) {
return SKIP_NONE;
}
// skip to the same object key in both M and V range listings
return [DbPrefixes.Master + skipV0,
DbPrefixes.Version + skipV0];
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result() {
/* NextMarker is only provided when delimiter is used.
* specified in v1 listing documentation
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
*/
return {
CommonPrefixes: this.CommonPrefixes,
Versions: this.Contents,
IsTruncated: this.IsTruncated,
NextKeyMarker: this.IsTruncated ? this.NextMarker : undefined,
NextVersionIdMarker: this.IsTruncated ?
this.NextVersionIdMarker : undefined,
Delimiter: this.delimiter,
};
}
}
module.exports = { DelimiterVersions };

View File

@ -0,0 +1,535 @@
'use strict'; // eslint-disable-line strict
const Extension = require('./Extension').default;
import {
FilterState,
FilterReturnValue,
} from './delimiter';
const Version = require('../../versioning/Version').Version;
const VSConst = require('../../versioning/constants').VersioningConstants;
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
require('./tools');
const VID_SEP = VSConst.VersionId.Separator;
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
export const enum DelimiterVersionsFilterStateId {
NotSkipping = 1,
SkippingPrefix = 2,
SkippingVersions = 3,
};
export interface DelimiterVersionsFilterState_NotSkipping extends FilterState {
id: DelimiterVersionsFilterStateId.NotSkipping,
};
export interface DelimiterVersionsFilterState_SkippingPrefix extends FilterState {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: string;
};
export interface DelimiterVersionsFilterState_SkippingVersions extends FilterState {
id: DelimiterVersionsFilterStateId.SkippingVersions,
gt: string;
};
type KeyHandler = (key: string, versionId: string | undefined, value: string) => FilterReturnValue;
type ResultObject = {
CommonPrefixes: string[],
Versions: {
key: string;
value: string;
versionId: string;
}[];
IsTruncated: boolean;
Delimiter ?: string;
NextKeyMarker ?: string;
NextVersionIdMarker ?: string;
};
type GenMDParamsItem = {
gt ?: string,
gte ?: string,
lt ?: string,
};
/**
* Handle object listing with parameters
*
* @prop {String[]} CommonPrefixes - 'folders' defined by the delimiter
* @prop {String[]} Contents - 'files' to list
* @prop {Boolean} IsTruncated - truncated listing flag
* @prop {String|undefined} NextMarker - marker per amazon format
* @prop {Number} keys - count of listed keys
* @prop {String|undefined} delimiter - separator per amazon format
* @prop {String|undefined} prefix - prefix per amazon format
* @prop {Number} maxKeys - number of keys to list
*/
export class DelimiterVersions extends Extension {
state: FilterState;
keyHandlers: { [id: number]: KeyHandler };
constructor(parameters, logger, vFormat) {
super(parameters, logger);
// original listing parameters
this.delimiter = parameters.delimiter;
this.prefix = parameters.prefix;
this.maxKeys = parameters.maxKeys || 1000;
// specific to version listing
this.keyMarker = parameters.keyMarker;
this.versionIdMarker = parameters.versionIdMarker;
// internal state
this.masterKey = undefined;
this.masterVersionId = undefined;
this.nullKey = null;
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
// listing results
this.CommonPrefixes = [];
this.Versions = [];
this.IsTruncated = false;
this.nextKeyMarker = parameters.keyMarker;
this.nextVersionIdMarker = undefined;
this.keyHandlers = {};
Object.assign(this, {
[BucketVersioningKeyFormat.v0]: {
genMDParams: this.genMDParamsV0,
getObjectKey: this.getObjectKeyV0,
skipping: this.skippingV0,
},
[BucketVersioningKeyFormat.v1]: {
genMDParams: this.genMDParamsV1,
getObjectKey: this.getObjectKeyV1,
skipping: this.skippingV1,
},
}[this.vFormat]);
if (this.vFormat === BucketVersioningKeyFormat.v0) {
this.setKeyHandler(
DelimiterVersionsFilterStateId.NotSkipping,
this.keyHandler_NotSkippingV0.bind(this));
} else {
this.setKeyHandler(
DelimiterVersionsFilterStateId.NotSkipping,
this.keyHandler_NotSkippingV1.bind(this));
}
this.setKeyHandler(
DelimiterVersionsFilterStateId.SkippingPrefix,
this.keyHandler_SkippingPrefix.bind(this));
this.setKeyHandler(
DelimiterVersionsFilterStateId.SkippingVersions,
this.keyHandler_SkippingVersions.bind(this));
if (this.versionIdMarker) {
this.state = <DelimiterVersionsFilterState_SkippingVersions> {
id: DelimiterVersionsFilterStateId.SkippingVersions,
gt: `${this.keyMarker}${VID_SEP}${this.versionIdMarker}`,
};
} else {
this.state = <DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
};
}
}
genMDParamsV0() {
const params: GenMDParamsItem = {};
if (this.prefix) {
params.gte = this.prefix;
params.lt = inc(this.prefix);
}
if (this.keyMarker && this.delimiter) {
const commonPrefix = this.getCommonPrefix(this.keyMarker);
if (commonPrefix) {
const afterPrefix = inc(commonPrefix);
if (!params.gte || afterPrefix > params.gte) {
params.gte = afterPrefix;
}
}
}
if (this.keyMarker && (!params.gte || this.keyMarker >= params.gte)) {
delete params.gte;
if (this.versionIdMarker) {
// start from the beginning of versions so we can
// check if there's a null key and fetch it
// (afterwards, we can skip the rest of versions until
// we reach versionIdMarker)
params.gte = `${this.keyMarker}${VID_SEP}`;
} else {
params.gt = `${this.keyMarker}${inc(VID_SEP)}`;
}
}
return params;
}
genMDParamsV1() {
// return an array of two listing params sets to ask for
// synchronized listing of M and V ranges
const v0Params: GenMDParamsItem = this.genMDParamsV0();
const mParams: GenMDParamsItem = {};
const vParams: GenMDParamsItem = {};
if (v0Params.gt) {
mParams.gt = `${DbPrefixes.Master}${v0Params.gt}`;
vParams.gt = `${DbPrefixes.Version}${v0Params.gt}`;
} else if (v0Params.gte) {
mParams.gte = `${DbPrefixes.Master}${v0Params.gte}`;
vParams.gte = `${DbPrefixes.Version}${v0Params.gte}`;
} else {
mParams.gte = DbPrefixes.Master;
vParams.gte = DbPrefixes.Version;
}
if (v0Params.lt) {
mParams.lt = `${DbPrefixes.Master}${v0Params.lt}`;
vParams.lt = `${DbPrefixes.Version}${v0Params.lt}`;
} else {
mParams.lt = inc(DbPrefixes.Master);
vParams.lt = inc(DbPrefixes.Version);
}
return [mParams, vParams];
}
/**
* check if the max keys count has been reached and set the
* final state of the result if it is the case
* @return {Boolean} - indicates if the iteration has to stop
*/
_reachedMaxKeys(): boolean {
if (this.keys >= this.maxKeys) {
// In cases of maxKeys <= 0 -> IsTruncated = false
this.IsTruncated = this.maxKeys > 0;
return true;
}
return false;
}
/**
* Used to synchronize listing of M and V prefixes by object key
*
* @param {object} masterObj object listed from first range
* returned by genMDParamsV1() (the master keys range)
* @param {object} versionObj object listed from second range
* returned by genMDParamsV1() (the version keys range)
* @return {number} comparison result:
* * -1 if master key < version key
* * 1 if master key > version key
*/
compareObjects(masterObj, versionObj) {
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
return masterKey < versionKey ? -1 : 1;
}
/**
* Parse a listing key into its nonversioned key and version ID components
*
* @param {string} key - full listing key
* @return {object} obj
* @return {string} obj.key - nonversioned part of key
* @return {string} [obj.versionId] - version ID in the key
*/
parseKey(fullKey: string): { key: string, versionId ?: string } {
const versionIdIndex = fullKey.indexOf(VID_SEP);
if (versionIdIndex === -1) {
return { key: fullKey };
}
const nonversionedKey: string = fullKey.slice(0, versionIdIndex);
let versionId: string = fullKey.slice(versionIdIndex + 1);
return { key: nonversionedKey, versionId };
}
/**
* Include a key in the listing output, in the Versions or CommonPrefix result
*
* @param {string} key - key (without version ID)
* @param {string} versionId - version ID
* @param {string} value - metadata value
* @return {undefined}
*/
addKey(key: string, versionId: string, value: string) {
// add the subprefix to the common prefixes if the key has the delimiter
const commonPrefix = this.getCommonPrefix(key);
if (commonPrefix) {
this.addCommonPrefix(commonPrefix);
// transition into SkippingPrefix state to skip all following keys
// while they start with the same prefix
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: commonPrefix,
});
} else {
this.addVersion(key, versionId, value);
}
}
/**
* Add a (key, versionId, value) tuple to the listing.
* Set the NextMarker to the current key
* Increment the keys counter
* @param {String} key - The key to add
* @param {String} versionId - versionId
* @param {String} value - The value of the key
* @return {undefined}
*/
addVersion(key: string, versionId: string, value: string) {
this.Versions.push({
key,
versionId,
value: this.trimMetadata(value),
});
this.nextKeyMarker = key;
this.nextVersionIdMarker = versionId;
++this.keys;
}
getCommonPrefix(key: string): string | undefined {
if (!this.delimiter) {
return undefined;
}
const baseIndex = this.prefix ? this.prefix.length : 0;
const delimiterIndex = key.indexOf(this.delimiter, baseIndex);
if (delimiterIndex === -1) {
return undefined;
}
return key.substring(0, delimiterIndex + this.delimiter.length);
}
/**
* Add a Common Prefix in the list
* @param {String} commonPrefix - common prefix to add
* @return {undefined}
*/
addCommonPrefix(commonPrefix: string): void {
// add the new prefix to the list
this.CommonPrefixes.push(commonPrefix);
++this.keys;
this.nextKeyMarker = commonPrefix;
this.nextVersionIdMarker = undefined;
}
/**
* Cache the current null key, to save it for outputting it later at
* the correct position
*
* @param {String} key - nonversioned key of the null key
* @param {String} versionId - real version ID of the null key
* @param {String} value - value of the null key
* @return {undefined}
*/
cacheNullKey(key: string, versionId: string, value: string): void {
this.nullKey = { key, versionId, value };
}
getObjectKeyV0(obj: { key: string }): string {
return obj.key;
}
getObjectKeyV1(obj: { key: string }): string {
return obj.key.slice(DbPrefixes.Master.length);
}
/**
* Filter to apply on each iteration, based on:
* - prefix
* - delimiter
* - maxKeys
* The marker is being handled directly by levelDB
* @param {Object} obj - The key and value of the element
* @param {String} obj.key - The key of the element
* @param {String} obj.value - The value of the element
* @return {number} - indicates if iteration should continue
*/
filter(obj: { key: string, value: string }): FilterReturnValue {
const key = this.getObjectKey(obj);
const value = obj.value;
const { key: nonversionedKey, versionId: keyVersionId } = this.parseKey(key);
if (this.nullKey) {
if (this.nullKey.key !== nonversionedKey
|| this.nullKey.versionId < <string> keyVersionId) {
this.handleKey(
this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
this.nullKey = null;
}
}
if (keyVersionId === '') {
// null key
this.cacheNullKey(nonversionedKey, Version.from(value).getVersionId(), value);
if (this.state.id === DelimiterVersionsFilterStateId.SkippingVersions) {
return FILTER_SKIP;
}
return FILTER_ACCEPT;
}
return this.handleKey(nonversionedKey, keyVersionId, value);
}
setState(state: FilterState): void {
this.state = state;
}
setKeyHandler(stateId: number, keyHandler: KeyHandler): void {
this.keyHandlers[stateId] = keyHandler;
}
handleKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
return this.keyHandlers[this.state.id](key, versionId, value);
}
keyHandler_NotSkippingV0(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (key.startsWith(DbPrefixes.Replay)) {
// skip internal replay prefix entirely
this.setState(<DelimiterVersionsFilterState_SkippingPrefix> {
id: DelimiterVersionsFilterStateId.SkippingPrefix,
prefix: DbPrefixes.Replay,
});
return FILTER_SKIP;
}
if (Version.isPHD(value)) {
return FILTER_ACCEPT;
}
return this.filter_onNewKey(key, versionId, value);
}
keyHandler_NotSkippingV1(key: string, versionId: string | undefined, value: string): FilterReturnValue {
// NOTE: this check on PHD is only useful for Artesca, S3C
// does not use PHDs in V1 format
if (Version.isPHD(value)) {
return FILTER_ACCEPT;
}
return this.filter_onNewKey(key, versionId, value);
}
filter_onNewKey(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (this._reachedMaxKeys()) {
return FILTER_END;
}
if (versionId === undefined) {
this.masterKey = key;
this.masterVersionId = Version.from(value).getVersionId() || 'null';
this.addKey(this.masterKey, this.masterVersionId, value);
} else {
if (this.masterKey === key && this.masterVersionId === versionId) {
// do not add a version key if it is the master version
return FILTER_ACCEPT;
}
this.addKey(key, versionId, value);
}
return FILTER_ACCEPT;
}
keyHandler_SkippingPrefix(key: string, versionId: string | undefined, value: string): FilterReturnValue {
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
if (key.startsWith(prefix)) {
return FILTER_SKIP;
}
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
});
return this.handleKey(key, versionId, value);
}
keyHandler_SkippingVersions(key: string, versionId: string | undefined, value: string): FilterReturnValue {
if (key === this.keyMarker) {
// since the nonversioned key equals the marker, there is
// necessarily a versionId in this key
const _versionId = <string> versionId;
if (_versionId < this.versionIdMarker) {
// skip all versions until marker
return FILTER_SKIP;
}
if (_versionId === this.versionIdMarker) {
// nothing left to skip, so return ACCEPT, but don't add this version
return FILTER_ACCEPT;
}
}
this.setState(<DelimiterVersionsFilterState_NotSkipping> {
id: DelimiterVersionsFilterStateId.NotSkipping,
});
return this.handleKey(key, versionId, value);
}
skippingBase(): string | undefined {
switch (this.state.id) {
case DelimiterVersionsFilterStateId.SkippingPrefix:
const { prefix } = <DelimiterVersionsFilterState_SkippingPrefix> this.state;
return inc(prefix);
case DelimiterVersionsFilterStateId.SkippingVersions:
const { gt } = <DelimiterVersionsFilterState_SkippingVersions> this.state;
// the contract of skipping() is to return the first key
// that can be skipped to, so adding a null byte to skip
// over the existing versioned key set in 'gt'
return `${gt}\0`;
default:
return SKIP_NONE;
}
}
skippingV0() {
return this.skippingBase();
}
skippingV1() {
const skipTo = this.skippingBase();
if (skipTo === SKIP_NONE) {
return SKIP_NONE;
}
// skip to the same object key in both M and V range listings
return [
`${DbPrefixes.Master}${skipTo}`,
`${DbPrefixes.Version}${skipTo}`,
];
}
/**
* Return an object containing all mandatory fields to use once the
* iteration is done, doesn't show a NextMarker field if the output
* isn't truncated
* @return {Object} - following amazon format
*/
result() {
// Add the last null key if still in cache (when it is the
// last version of the last key)
//
// NOTE: _reachedMaxKeys sets IsTruncated to true when it
// returns true. Here we want this because either:
//
// - we did not reach the max keys yet so the result is not
// - truncated, and there is still room for the null key in
// - the results
//
// - OR we reached it already while having to process a new
// key (so the result is truncated even without the null key)
//
// - OR we are *just* below the limit but the null key to add
// does not fit, so we know the result is now truncated
// because there remains the null key to be output.
//
if (this.nullKey) {
this.handleKey(this.nullKey.key, this.nullKey.versionId, this.nullKey.value);
}
const result: ResultObject = {
CommonPrefixes: this.CommonPrefixes,
Versions: this.Versions,
IsTruncated: this.IsTruncated,
};
if (this.delimiter) {
result.Delimiter = this.delimiter;
}
if (this.IsTruncated) {
result.NextKeyMarker = this.nextKeyMarker;
if (this.nextVersionIdMarker) {
result.NextVersionIdMarker = this.nextVersionIdMarker;
}
};
return result;
}
}
module.exports = { DelimiterVersions };

View File

@ -6,4 +6,7 @@ module.exports = {
DelimiterMaster: require('./delimiterMaster')
.DelimiterMaster,
MPU: require('./MPU').MultipartUploads,
DelimiterCurrent: require('./delimiterCurrent').DelimiterCurrent,
DelimiterNonCurrent: require('./delimiterNonCurrent').DelimiterNonCurrent,
DelimiterOrphanDeleteMarker: require('./delimiterOrphanDeleteMarker').DelimiterOrphanDeleteMarker,
};

View File

@ -52,15 +52,22 @@ class Skip {
assert(this.skipRangeCb);
const filteringResult = this.extension.filter(entry);
const skippingRange = this.extension.skipping();
const skipTo = this.extension.skipping();
if (filteringResult === FILTER_END) {
this.listingEndCb();
} else if (filteringResult === FILTER_SKIP
&& skippingRange !== SKIP_NONE) {
&& skipTo !== SKIP_NONE) {
if (++this.streakLength >= MAX_STREAK_LENGTH) {
const newRange = this._inc(skippingRange);
let newRange;
if (Array.isArray(skipTo)) {
newRange = [];
for (let i = 0; i < skipTo.length; ++i) {
newRange.push(skipTo[i]);
}
} else {
newRange = skipTo;
}
/* Avoid to loop on the same range again and again. */
if (newRange === this.gteParams) {
this.streakLength = 1;
@ -72,16 +79,6 @@ class Skip {
this.streakLength = 0;
}
}
_inc(str) {
if (!str) {
return str;
}
const lastCharValue = str.charCodeAt(str.length - 1);
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
}
}

View File

@ -1,6 +1,4 @@
'use strict'; // eslint-disable-line strict
const constants = require('../constants');
import * as constants from '../constants';
/**
* Class containing requester's information received from Vault
@ -8,9 +6,15 @@ const constants = require('../constants');
* shortid, email, accountDisplayName and IAMdisplayName (if applicable)
* @return {AuthInfo} an AuthInfo instance
*/
export default class AuthInfo {
arn: string;
canonicalID: string;
shortid: string;
email: string;
accountDisplayName: string;
IAMdisplayName: string;
class AuthInfo {
constructor(objectFromVault) {
constructor(objectFromVault: any) {
// amazon resource name for IAM user (if applicable)
this.arn = objectFromVault.arn;
// account canonicalID
@ -53,10 +57,8 @@ class AuthInfo {
return this.canonicalID.startsWith(
`${constants.zenkoServiceAccount}/`);
}
isRequesterThisServiceAccount(serviceName) {
return this.canonicalID ===
`${constants.zenkoServiceAccount}/${serviceName}`;
isRequesterThisServiceAccount(serviceName: string) {
const computedCanonicalID = `${constants.zenkoServiceAccount}/${serviceName}`;
return this.canonicalID === computedCanonicalID;
}
}
module.exports = AuthInfo;

View File

@ -1,16 +1,22 @@
const errors = require('../errors');
const AuthInfo = require('./AuthInfo');
import { Logger } from 'werelogs';
import errors from '../errors';
import AuthInfo from './AuthInfo';
/** vaultSignatureCb parses message from Vault and instantiates
* @param {object} err - error from vault
* @param {object} authInfo - info from vault
* @param {object} log - log for request
* @param {function} callback - callback to authCheck functions
* @param {object} [streamingV4Params] - present if v4 signature;
* @param err - error from vault
* @param authInfo - info from vault
* @param log - log for request
* @param callback - callback to authCheck functions
* @param [streamingV4Params] - present if v4 signature;
* items used to calculate signature on chunks if streaming auth
* @return {undefined}
*/
function vaultSignatureCb(err, authInfo, log, callback, streamingV4Params) {
function vaultSignatureCb(
err: Error | null,
authInfo: { message: { body: any } },
log: Logger,
callback: (err: Error | null, data?: any, results?: any, params?: any, infos?: any) => void,
streamingV4Params?: any
) {
// vaultclient API guarantees that it returns:
// - either `err`, an Error object with `code` and `message` properties set
// - or `err == null` and `info` is an object with `message.code` and
@ -24,58 +30,101 @@ function vaultSignatureCb(err, authInfo, log, callback, streamingV4Params) {
const info = authInfo.message.body;
const userInfo = new AuthInfo(info.userInfo);
const authorizationResults = info.authorizationResults;
const auditLog = { accountDisplayName: userInfo.getAccountDisplayName() };
const auditLog: { accountDisplayName: string, IAMdisplayName?: string } =
{ accountDisplayName: userInfo.getAccountDisplayName() };
const iamDisplayName = userInfo.getIAMdisplayName();
if (iamDisplayName) {
auditLog.IAMdisplayName = iamDisplayName;
}
// @ts-ignore
log.addDefaultFields(auditLog);
return callback(null, userInfo, authorizationResults, streamingV4Params);
return callback(null, userInfo, authorizationResults, streamingV4Params, {
accountQuota: info.accountQuota || {},
});
}
export type AuthV4RequestParams = {
version: 4;
log: Logger;
data: {
accessKey: string;
signatureFromRequest: string;
region: string;
stringToSign: string;
scopeDate: string;
authType: 'query' | 'header';
signatureVersion: string;
signatureAge?: number;
timestamp: number;
credentialScope: string;
securityToken: string;
algo: string;
log: Logger;
};
};
/**
* Class that provides common authentication methods against different
* authentication backends.
* @class Vault
*/
class Vault {
export default class Vault {
client: any;
implName: string;
/**
* @constructor
* @param {object} client - authentication backend or vault client
* @param {string} implName - implementation name for auth backend
*/
constructor(client, implName) {
constructor(client: any, implName: string) {
this.client = client;
this.implName = implName;
}
/**
* authenticateV2Request
*
* @param {string} params - the authentication parameters as returned by
* @param params - the authentication parameters as returned by
* auth.extractParams
* @param {number} params.version - shall equal 2
* @param {string} params.data.accessKey - the user's accessKey
* @param {string} params.data.signatureFromRequest - the signature read
* @param params.version - shall equal 2
* @param params.data.accessKey - the user's accessKey
* @param params.data.signatureFromRequest - the signature read
* from the request
* @param {string} params.data.stringToSign - the stringToSign
* @param {string} params.data.algo - the hashing algorithm used for the
* @param params.data.stringToSign - the stringToSign
* @param params.data.algo - the hashing algorithm used for the
* signature
* @param {string} params.data.authType - the type of authentication (query
* @param params.data.authType - the type of authentication (query
* or header)
* @param {string} params.data.signatureVersion - the version of the
* @param params.data.signatureVersion - the version of the
* signature (AWS or AWS4)
* @param {number} [params.data.signatureAge] - the age of the signature in
* @param [params.data.signatureAge] - the age of the signature in
* ms
* @param {string} params.data.log - the logger object
* @param params.data.log - the logger object
* @param {RequestContext []} requestContexts - an array of RequestContext
* instances which contain information for policy authorization check
* @param {function} callback - callback with either error or user info
* @returns {undefined}
* @param callback - callback with either error or user info
*/
authenticateV2Request(params, requestContexts, callback) {
authenticateV2Request(
params: {
version: 2;
log: Logger;
data: {
securityToken: string;
accessKey: string;
signatureFromRequest: string;
stringToSign: string;
algo: string;
authType: 'query' | 'header';
signatureVersion: string;
signatureAge?: number;
log: Logger;
};
},
requestContexts: any[],
callback: (err: Error | null, data?: any) => void
) {
params.log.debug('authenticating V2 request');
let serializedRCsArr;
let serializedRCsArr: any;
if (requestContexts) {
serializedRCsArr = requestContexts.map(rc => rc.serialize());
}
@ -85,44 +134,48 @@ class Vault {
params.data.accessKey,
{
algo: params.data.algo,
// @ts-ignore
reqUid: params.log.getSerializedUids(),
logger: params.log,
securityToken: params.data.securityToken,
requestContext: serializedRCsArr,
},
(err, userInfo) => vaultSignatureCb(err, userInfo,
(err: Error | null, userInfo?: any) => vaultSignatureCb(err, userInfo,
params.log, callback),
);
}
/** authenticateV4Request
* @param {object} params - the authentication parameters as returned by
* @param params - the authentication parameters as returned by
* auth.extractParams
* @param {number} params.version - shall equal 4
* @param {string} params.data.log - the logger object
* @param {string} params.data.accessKey - the user's accessKey
* @param {string} params.data.signatureFromRequest - the signature read
* @param params.version - shall equal 4
* @param params.data.log - the logger object
* @param params.data.accessKey - the user's accessKey
* @param params.data.signatureFromRequest - the signature read
* from the request
* @param {string} params.data.region - the AWS region
* @param {string} params.data.stringToSign - the stringToSign
* @param {string} params.data.scopeDate - the timespan to allow the request
* @param {string} params.data.authType - the type of authentication (query
* @param params.data.region - the AWS region
* @param params.data.stringToSign - the stringToSign
* @param params.data.scopeDate - the timespan to allow the request
* @param params.data.authType - the type of authentication (query
* or header)
* @param {string} params.data.signatureVersion - the version of the
* @param params.data.signatureVersion - the version of the
* signature (AWS or AWS4)
* @param {number} params.data.signatureAge - the age of the signature in ms
* @param {number} params.data.timestamp - signaure timestamp
* @param {string} params.credentialScope - credentialScope for signature
* @param params.data.signatureAge - the age of the signature in ms
* @param params.data.timestamp - signaure timestamp
* @param params.credentialScope - credentialScope for signature
* @param {RequestContext [] | null} requestContexts -
* an array of RequestContext or null if authenticaiton of a chunk
* in streamingv4 auth
* instances which contain information for policy authorization check
* @param {function} callback - callback with either error or user info
* @return {undefined}
* @param callback - callback with either error or user info
*/
authenticateV4Request(params, requestContexts, callback) {
authenticateV4Request(
params: AuthV4RequestParams,
requestContexts: any[] | null,
callback: (err: Error | null, data?: any) => void
) {
params.log.debug('authenticating V4 request');
let serializedRCs;
let serializedRCs: any;
if (requestContexts) {
serializedRCs = requestContexts.map(rc => rc.serialize());
}
@ -140,31 +193,39 @@ class Vault {
params.data.region,
params.data.scopeDate,
{
// @ts-ignore
reqUid: params.log.getSerializedUids(),
logger: params.log,
securityToken: params.data.securityToken,
requestContext: serializedRCs,
},
(err, userInfo) => vaultSignatureCb(err, userInfo,
(err: Error | null, userInfo?: any) => vaultSignatureCb(err, userInfo,
params.log, callback, streamingV4Params),
);
}
/** getCanonicalIds -- call Vault to get canonicalIDs based on email
* addresses
* @param {array} emailAddresses - list of emailAddresses
* @param {object} log - log object
* @param {function} callback - callback with either error or an array
* @param emailAddresses - list of emailAddresses
* @param log - log object
* @param callback - callback with either error or an array
* of objects with each object containing the canonicalID and emailAddress
* of an account as properties
* @return {undefined}
*/
getCanonicalIds(emailAddresses, log, callback) {
getCanonicalIds(
emailAddresses: string[],
log: Logger,
callback: (
err: Error | null,
data?: { canonicalID: string; email: string }[]
) => void
) {
log.trace('getting canonicalIDs from Vault based on emailAddresses',
{ emailAddresses });
this.client.getCanonicalIds(emailAddresses,
// @ts-ignore
{ reqUid: log.getSerializedUids() },
(err, info) => {
(err: Error | null, info?: any) => {
if (err) {
log.debug('received error message from auth provider',
{ errorMessage: err });
@ -172,17 +233,17 @@ class Vault {
}
const infoFromVault = info.message.body;
log.trace('info received from vault', { infoFromVault });
const foundIds = [];
const foundIds: { canonicalID: string; email: string }[] = [];
for (let i = 0; i < Object.keys(infoFromVault).length; i++) {
const key = Object.keys(infoFromVault)[i];
if (infoFromVault[key] === 'WrongFormat'
|| infoFromVault[key] === 'NotFound') {
return callback(errors.UnresolvableGrantByEmailAddress);
}
const obj = {};
obj.email = key;
obj.canonicalID = infoFromVault[key];
foundIds.push(obj);
foundIds.push({
email: key,
canonicalID: infoFromVault[key],
})
}
return callback(null, foundIds);
});
@ -190,18 +251,22 @@ class Vault {
/** getEmailAddresses -- call Vault to get email addresses based on
* canonicalIDs
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} log - log object
* @param {function} callback - callback with either error or an object
* @param canonicalIDs - list of canonicalIDs
* @param log - log object
* @param callback - callback with either error or an object
* with canonicalID keys and email address values
* @return {undefined}
*/
getEmailAddresses(canonicalIDs, log, callback) {
getEmailAddresses(
canonicalIDs: string[],
log: Logger,
callback: (err: Error | null, data?: { [key: string]: any }) => void
) {
log.trace('getting emailAddresses from Vault based on canonicalIDs',
{ canonicalIDs });
this.client.getEmailAddresses(canonicalIDs,
// @ts-ignore
{ reqUid: log.getSerializedUids() },
(err, info) => {
(err: Error | null, info?: any) => {
if (err) {
log.debug('received error message from vault',
{ errorMessage: err });
@ -224,18 +289,22 @@ class Vault {
/** getAccountIds -- call Vault to get accountIds based on
* canonicalIDs
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} log - log object
* @param {function} callback - callback with either error or an object
* @param canonicalIDs - list of canonicalIDs
* @param log - log object
* @param callback - callback with either error or an object
* with canonicalID keys and accountId values
* @return {undefined}
*/
getAccountIds(canonicalIDs, log, callback) {
getAccountIds(
canonicalIDs: string[],
log: Logger,
callback: (err: Error | null, data?: { [key: string]: string }) => void
) {
log.trace('getting accountIds from Vault based on canonicalIDs',
{ canonicalIDs });
this.client.getAccountIds(canonicalIDs,
// @ts-expect-error
{ reqUid: log.getSerializedUids() },
(err, info) => {
(err: Error | null, info?: any) => {
if (err) {
log.debug('received error message from vault',
{ errorMessage: err });
@ -268,14 +337,19 @@ class Vault {
* @param {object} log - log object
* @param {function} callback - callback with either error or an array
* of authorization results
* @return {undefined}
*/
checkPolicies(requestContextParams, userArn, log, callback) {
checkPolicies(
requestContextParams: any[],
userArn: string,
log: Logger,
callback: (err: Error | null, data?: any[]) => void
) {
log.trace('sending request context params to vault to evaluate' +
'policies');
this.client.checkPolicies(requestContextParams, userArn, {
// @ts-ignore
reqUid: log.getSerializedUids(),
}, (err, info) => {
}, (err: Error | null, info?: any) => {
if (err) {
log.debug('received error message from auth provider',
{ error: err });
@ -286,13 +360,14 @@ class Vault {
});
}
checkHealth(log, callback) {
checkHealth(log: Logger, callback: (err: Error | null, data?: any) => void) {
if (!this.client.healthcheck) {
const defResp = {};
defResp[this.implName] = { code: 200, message: 'OK' };
return callback(null, defResp);
}
return this.client.healthcheck(log.getSerializedUids(), (err, obj) => {
// @ts-ignore
return this.client.healthcheck(log.getSerializedUids(), (err: Error | null, obj?: any) => {
const respBody = {};
if (err) {
log.debug(`error from ${this.implName}`, { error: err });
@ -311,6 +386,19 @@ class Vault {
return callback(null, respBody);
});
}
}
module.exports = Vault;
report(log: Logger, callback: (err: Error | null, data?: any) => void) {
// call the report function of the client
if (!this.client.report) {
return callback(null, {});
}
// @ts-ignore
return this.client.report(log.getSerializedUids(), (err: Error | null, obj?: any) => {
if (err) {
log.debug(`error from ${this.implName}`, { error: err });
return callback(err);
}
return callback(null, obj);
});
}
}

View File

@ -1,24 +1,23 @@
'use strict'; // eslint-disable-line strict
import * as crypto from 'crypto';
import { Logger } from 'werelogs';
import errors from '../errors';
import * as queryString from 'querystring';
import AuthInfo from './AuthInfo';
import * as v2 from './v2/authV2';
import * as v4 from './v4/authV4';
import * as constants from '../constants';
import constructStringToSignV2 from './v2/constructStringToSign';
import constructStringToSignV4 from './v4/constructStringToSign';
import { convertUTCtoISO8601 } from './v4/timeUtils';
import * as vaultUtilities from './backends/in_memory/vaultUtilities';
import * as inMemoryBackend from './backends/in_memory/Backend';
import baseBackend from './backends/base';
import chainBackend from './backends/ChainBackend';
import validateAuthConfig from './backends/in_memory/validateAuthConfig';
import AuthLoader from './backends/in_memory/AuthLoader';
import Vault from './Vault';
const crypto = require('crypto');
const errors = require('../errors');
const queryString = require('querystring');
const AuthInfo = require('./AuthInfo');
const v2 = require('./v2/authV2');
const v4 = require('./v4/authV4');
const constants = require('../constants');
const constructStringToSignV2 = require('./v2/constructStringToSign');
const constructStringToSignV4 = require('./v4/constructStringToSign');
const convertUTCtoISO8601 = require('./v4/timeUtils').convertUTCtoISO8601;
const vaultUtilities = require('./backends/in_memory/vaultUtilities');
const inMemoryBackend = require('./backends/in_memory/Backend');
const validateAuthConfig = require('./backends/in_memory/validateAuthConfig');
const AuthLoader = require('./backends/in_memory/AuthLoader');
const Vault = require('./Vault');
const baseBackend = require('./backends/base');
const chainBackend = require('./backends/ChainBackend');
let vault = null;
let vault: Vault | null = null;
const auth = {};
const checkFunctions = {
v2: {
@ -35,7 +34,7 @@ const checkFunctions = {
// 'All Users Group' so use this group as the canonicalID for the publicUser
const publicUserInfo = new AuthInfo({ canonicalID: constants.publicId });
function setAuthHandler(handler) {
function setAuthHandler(handler: Vault) {
vault = handler;
return auth;
}
@ -43,25 +42,30 @@ function setAuthHandler(handler) {
/**
* This function will check validity of request parameters to authenticate
*
* @param {Http.Request} request - Http request object
* @param {object} log - Logger object
* @param {string} awsService - Aws service related
* @param {object} data - Parameters from queryString parsing or body of
* @param request - Http request object
* @param log - Logger object
* @param awsService - Aws service related
* @param data - Parameters from queryString parsing or body of
* POST request
*
* @return {object} ret
* @return {object} ret.err - arsenal.errors object if any error was found
* @return {object} ret.params - auth parameters to use later on for signature
* @return ret
* @return ret.err - arsenal.errors object if any error was found
* @return ret.params - auth parameters to use later on for signature
* computation and check
* @return {object} ret.params.version - the auth scheme version
* @return ret.params.version - the auth scheme version
* (undefined, 2, 4)
* @return {object} ret.params.data - the auth scheme's specific data
* @return ret.params.data - the auth scheme's specific data
*/
function extractParams(request, log, awsService, data) {
function extractParams(
request: any,
log: Logger,
awsService: string,
data: { [key: string]: string }
) {
log.trace('entered', { method: 'Arsenal.auth.server.extractParams' });
const authHeader = request.headers.authorization;
let version = null;
let method = null;
let version: 'v2' |'v4' | null = null;
let method: 'query' | 'headers' | null = null;
// Identify auth version and method to dispatch to the right check function
if (authHeader) {
@ -104,16 +108,21 @@ function extractParams(request, log, awsService, data) {
/**
* This function will check validity of request parameters to authenticate
*
* @param {Http.Request} request - Http request object
* @param {object} log - Logger object
* @param {function} cb - the callback
* @param {string} awsService - Aws service related
* @param request - Http request object
* @param log - Logger object
* @param cb - the callback
* @param awsService - Aws service related
* @param {RequestContext[] | null} requestContexts - array of RequestContext
* or null if no requestContexts to be sent to Vault (for instance,
* in multi-object delete request)
* @return {undefined}
*/
function doAuth(request, log, cb, awsService, requestContexts) {
function doAuth(
request: any,
log: Logger,
cb: (err: Error | null, data?: any) => void,
awsService: string,
requestContexts: any[] | null
) {
const res = extractParams(request, log, awsService, request.query);
if (res.err) {
return cb(res.err);
@ -121,23 +130,31 @@ function doAuth(request, log, cb, awsService, requestContexts) {
return cb(null, res.params);
}
if (requestContexts) {
requestContexts.forEach(requestContext => {
requestContext.setAuthType(res.params.data.authType);
requestContext.setSignatureVersion(res.params
.data.signatureVersion);
requestContext.setSignatureAge(res.params.data.signatureAge);
requestContext.setSecurityToken(res.params.data.securityToken);
requestContexts.forEach((requestContext) => {
const { params } = res
if ('data' in params) {
const { data } = params
requestContext.setAuthType(data.authType);
requestContext.setSignatureVersion(data.signatureVersion);
requestContext.setSecurityToken(data.securityToken);
if ('signatureAge' in data) {
requestContext.setSignatureAge(data.signatureAge);
}
}
});
}
// Corner cases managed, we're left with normal auth
// TODO What's happening here?
// @ts-ignore
res.params.log = log;
if (res.params.version === 2) {
return vault.authenticateV2Request(res.params, requestContexts, cb);
// @ts-ignore
return vault!.authenticateV2Request(res.params, requestContexts, cb);
}
if (res.params.version === 4) {
return vault.authenticateV4Request(res.params, requestContexts, cb,
awsService);
// @ts-ignore
return vault!.authenticateV4Request(res.params, requestContexts, cb);
}
log.error('authentication method not found', {
@ -146,22 +163,44 @@ function doAuth(request, log, cb, awsService, requestContexts) {
return cb(errors.InternalError);
}
/**
* This function will generate a version 4 content-md5 header
* It looks at the request path to determine what kind of header encoding is required
*
* @param path - the request path
* @param payload - the request payload to hash
*/
function generateContentMD5Header(
path: string,
payload: string,
) {
const encoding = path && path.startsWith('/_/backbeat/') ? 'hex' : 'base64';
return crypto.createHash('md5').update(payload, 'binary').digest(encoding);
}
/**
* This function will generate a version 4 header
*
* @param {Http.Request} request - Http request object
* @param {object} data - Parameters from queryString parsing or body of
* @param request - Http request object
* @param data - Parameters from queryString parsing or body of
* POST request
* @param {string} accessKey - the accessKey
* @param {string} secretKeyValue - the secretKey
* @param {string} awsService - Aws service related
* @param {sting} [proxyPath] - path that gets proxied by reverse proxy
* @param {string} [sessionToken] - security token if the access/secret keys
* @param accessKey - the accessKey
* @param secretKeyValue - the secretKey
* @param awsService - Aws service related
* @param [proxyPath] - path that gets proxied by reverse proxy
* @param [sessionToken] - security token if the access/secret keys
* are temporary credentials from STS
* @return {undefined}
* @param [payload] - body of the request if any
*/
function generateV4Headers(request, data, accessKey, secretKeyValue,
awsService, proxyPath, sessionToken) {
function generateV4Headers(
request: any,
data: { [key: string]: string },
accessKey: string,
secretKeyValue: string,
awsService: string,
proxyPath?: string,
sessionToken?: string,
payload?: string,
) {
Object.assign(request, { headers: {} });
const amzDate = convertUTCtoISO8601(Date.now());
// get date without time
@ -173,9 +212,9 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
const timestamp = amzDate;
const algorithm = 'AWS4-HMAC-SHA256';
let payload = '';
payload = payload || '';
if (request.method === 'POST') {
payload = queryString.stringify(data, null, null, {
payload = queryString.stringify(data, undefined, undefined, {
encodeURIComponent,
});
}
@ -184,6 +223,7 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
request.setHeader('host', request._headers.host);
request.setHeader('x-amz-date', amzDate);
request.setHeader('x-amz-content-sha256', payloadChecksum);
request.setHeader('content-md5', generateContentMD5Header(request.path, payload));
if (sessionToken) {
request.setHeader('x-amz-security-token', sessionToken);
@ -194,6 +234,7 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
.filter(headerName =>
headerName.startsWith('x-amz-')
|| headerName.startsWith('x-scal-')
|| headerName === 'content-md5'
|| headerName === 'host',
).sort().join(';');
const params = { request, signedHeaders, payloadChecksum,
@ -205,7 +246,7 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
scopeDate,
service);
const signature = crypto.createHmac('sha256', signingKey)
.update(stringToSign, 'binary').digest('hex');
.update(stringToSign as string, 'binary').digest('hex');
const authorizationHeader = `${algorithm} Credential=${accessKey}` +
`/${credentialScope}, SignedHeaders=${signedHeaders}, ` +
`Signature=${signature}`;
@ -213,25 +254,12 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
Object.assign(request, { headers: {} });
}
module.exports = {
setHandler: setAuthHandler,
server: {
extractParams,
doAuth,
},
client: {
generateV4Headers,
constructStringToSignV2,
},
inMemory: {
backend: inMemoryBackend,
validateAuthConfig,
AuthLoader,
},
backends: {
baseBackend,
chainBackend,
},
export const server = { extractParams, doAuth }
export const client = { generateV4Headers, constructStringToSignV2 }
export const inMemory = { backend: inMemoryBackend, validateAuthConfig, AuthLoader }
export const backends = { baseBackend, chainBackend }
export {
setAuthHandler as setHandler,
AuthInfo,
Vault,
};
Vault
}

View File

@ -1,10 +1,7 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const errors = require('../../errors');
const BaseBackend = require('./base');
import assert from 'assert';
import async from 'async';
import errors from '../../errors';
import BaseBackend from './base';
/**
* Class that provides an authentication backend that will verify signatures
@ -13,13 +10,15 @@ const BaseBackend = require('./base');
*
* @class ChainBackend
*/
class ChainBackend extends BaseBackend {
export default class ChainBackend extends BaseBackend {
_clients: any[];
/**
* @constructor
* @param {string} service - service id
* @param {object[]} clients - list of authentication backends or vault clients
*/
constructor(service, clients) {
constructor(service: string, clients: any[]) {
super(service);
assert(Array.isArray(clients) && clients.length > 0, 'invalid client list');
@ -38,18 +37,25 @@ class ChainBackend extends BaseBackend {
/*
* try task against each client for one to be successful
*/
_tryEachClient(task, cb) {
_tryEachClient(task: any, cb: any) {
// @ts-ignore
async.tryEach(this._clients.map(client => done => task(client, done)), cb);
}
/*
* apply task to all clients
*/
_forEachClient(task, cb) {
_forEachClient(task: any, cb: any) {
async.map(this._clients, task, cb);
}
verifySignatureV2(stringToSign, signatureFromRequest, accessKey, options, callback) {
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: any,
callback: any,
) {
this._tryEachClient((client, done) => client.verifySignatureV2(
stringToSign,
signatureFromRequest,
@ -59,7 +65,15 @@ class ChainBackend extends BaseBackend {
), callback);
}
verifySignatureV4(stringToSign, signatureFromRequest, accessKey, region, scopeDate, options, callback) {
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
options: any,
callback: any,
) {
this._tryEachClient((client, done) => client.verifySignatureV4(
stringToSign,
signatureFromRequest,
@ -71,13 +85,13 @@ class ChainBackend extends BaseBackend {
), callback);
}
static _mergeObjects(objectResponses) {
static _mergeObjects(objectResponses: any) {
return objectResponses.reduce(
(retObj, resObj) => Object.assign(retObj, resObj.message.body),
{});
}
getCanonicalIds(emailAddresses, options, callback) {
getCanonicalIds(emailAddresses: string[], options: any, callback: any) {
this._forEachClient(
(client, done) => client.getCanonicalIds(emailAddresses, options, done),
(err, res) => {
@ -93,7 +107,7 @@ class ChainBackend extends BaseBackend {
});
}
getEmailAddresses(canonicalIDs, options, callback) {
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
this._forEachClient(
(client, done) => client.getEmailAddresses(canonicalIDs, options, done),
(err, res) => {
@ -111,31 +125,45 @@ class ChainBackend extends BaseBackend {
/*
* merge policy responses into a single message
*/
static _mergePolicies(policyResponses) {
const policyMap = {};
static _mergePolicies(policyResponses: any) {
const policyMap: any = {};
policyResponses.forEach(resp => {
if (!resp.message || !Array.isArray(resp.message.body)) {
return;
}
resp.message.body.forEach(policy => {
const key = (policy.arn || '') + (policy.versionId || '');
const check = (policy) => {
const key = (policy.arn || '') + (policy.versionId || '') + (policy.action || '');
if (!policyMap[key] || !policyMap[key].isAllowed) {
policyMap[key] = policy;
}
// else is duplicate policy
};
resp.message.body.forEach(policy => {
if (Array.isArray(policy)) {
policy.forEach(authResult => check(authResult));
} else {
check(policy);
}
});
});
return Object.keys(policyMap).map(key => {
const policyRes = { isAllowed: policyMap[key].isAllowed };
const policyRes: any = { isAllowed: policyMap[key].isAllowed };
if (policyMap[key].arn !== '') {
policyRes.arn = policyMap[key].arn;
}
if (policyMap[key].versionId) {
policyRes.versionId = policyMap[key].versionId;
}
if (policyMap[key].isImplicit !== undefined) {
policyRes.isImplicit = policyMap[key].isImplicit;
}
if (policyMap[key].action) {
policyRes.action = policyMap[key].action;
}
return policyRes;
});
}
@ -148,7 +176,7 @@ class ChainBackend extends BaseBackend {
message: string,
} }
*/
checkPolicies(requestContextParams, userArn, options, callback) {
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
this._forEachClient((client, done) => client.checkPolicies(
requestContextParams,
userArn,
@ -166,7 +194,7 @@ class ChainBackend extends BaseBackend {
});
}
healthcheck(reqUid, callback) {
healthcheck(reqUid: string, callback: any) {
this._forEachClient((client, done) =>
client.healthcheck(reqUid, (err, res) => done(null, {
error: !!err ? err : null,
@ -184,6 +212,22 @@ class ChainBackend extends BaseBackend {
return callback(null, res);
});
}
}
module.exports = ChainBackend;
report(reqUid: string, callback: any) {
this._forEachClient((client, done) =>
client.report(reqUid, done),
(err, res) => {
if (err) {
return callback(err);
}
const mergedRes = res.reduce((acc, val) => {
Object.keys(val).forEach(k => {
acc[k] = val[k];
});
return acc;
}, {});
return callback(null, mergedRes);
});
}
}

View File

@ -1,86 +0,0 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../errors');
/**
* Base backend class
*
* @class BaseBackend
*/
class BaseBackend {
/**
* @constructor
* @param {string} service - service identifer for construction arn
*/
constructor(service) {
this.service = service;
}
/** verifySignatureV2
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {object} options - contains algorithm (SHA1 or SHA256)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV2(stringToSign, signatureFromRequest,
accessKey, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/** verifySignatureV4
* @param {string} stringToSign - string to sign built per AWS rules
* @param {string} signatureFromRequest - signature sent with request
* @param {string} accessKey - account accessKey
* @param {string} region - region specified in request credential
* @param {string} scopeDate - date specified in request credential
* @param {object} options - options to send to Vault
* (just contains reqUid for logging in Vault)
* @param {function} callback - callback with either error or user info
* @return {function} calls callback
*/
verifySignatureV4(stringToSign, signatureFromRequest, accessKey,
region, scopeDate, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets canonical ID's for a list of accounts
* based on email associated with account
* @param {array} emails - list of email addresses
* @param {object} options - to send log id to vault
* @param {function} callback - callback to calling function
* @returns {function} callback with either error or
* object with email addresses as keys and canonical IDs
* as values
*/
getCanonicalIds(emails, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets email addresses (referred to as diplay names for getACL's)
* for a list of accounts based on canonical IDs associated with account
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} options - to send log id to vault
* @param {function} callback - callback to calling function
* @returns {function} callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an email address as the value (or "NotFound")
*/
getEmailAddresses(canonicalIDs, options, callback) {
return callback(errors.AuthMethodNotImplemented);
}
checkPolicies(requestContextParams, userArn, options, callback) {
return callback(null, { message: { body: [] } });
}
healthcheck(reqUid, callback) {
return callback(null, { code: 200, message: 'OK' });
}
}
module.exports = BaseBackend;

96
lib/auth/backends/base.ts Normal file
View File

@ -0,0 +1,96 @@
import errors from '../../errors';
/**
* Base backend class
*
* @class BaseBackend
*/
export default class BaseBackend {
service: string;
/**
* @constructor
* @param {string} service - service identifer for construction arn
*/
constructor(service: string) {
this.service = service;
}
/** verifySignatureV2
* @param stringToSign - string to sign built per AWS rules
* @param signatureFromRequest - signature sent with request
* @param accessKey - account accessKey
* @param options - contains algorithm (SHA1 or SHA256)
* @param callback - callback with either error or user info
* @return calls callback
*/
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: any,
callback: any
) {
return callback(errors.AuthMethodNotImplemented);
}
/** verifySignatureV4
* @param stringToSign - string to sign built per AWS rules
* @param signatureFromRequest - signature sent with request
* @param accessKey - account accessKey
* @param region - region specified in request credential
* @param scopeDate - date specified in request credential
* @param options - options to send to Vault
* (just contains reqUid for logging in Vault)
* @param callback - callback with either error or user info
* @return calls callback
*/
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
options: any,
callback: any
) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets canonical ID's for a list of accounts
* based on email associated with account
* @param emails - list of email addresses
* @param options - to send log id to vault
* @param callback - callback to calling function
* @returns callback with either error or
* object with email addresses as keys and canonical IDs
* as values
*/
getCanonicalIds(emails: string[], options: any, callback: any) {
return callback(errors.AuthMethodNotImplemented);
}
/**
* Gets email addresses (referred to as diplay names for getACL's)
* for a list of accounts based on canonical IDs associated with account
* @param canonicalIDs - list of canonicalIDs
* @param options - to send log id to vault
* @param callback - callback to calling function
* @returns callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an email address as the value (or "NotFound")
*/
getEmailAddresses(canonicalIDs: string[], options: any, callback: any) {
return callback(errors.AuthMethodNotImplemented);
}
checkPolicies(requestContextParams: any, userArn: string, options: any, callback: any) {
return callback(null, { message: { body: [] } });
}
healthcheck(reqUid: string, callback: any) {
return callback(null, { code: 200, message: 'OK' });
}
}

View File

@ -1,223 +0,0 @@
const fs = require('fs');
const glob = require('simple-glob');
const joi = require('@hapi/joi');
const werelogs = require('werelogs');
const ARN = require('../../../models/ARN');
/**
* Load authentication information from files or pre-loaded account
* objects
*
* @class AuthLoader
*/
class AuthLoader {
constructor(logApi) {
this._log = new (logApi || werelogs).Logger('S3');
this._authData = { accounts: [] };
// null: unknown validity, true/false: valid or invalid
this._isValid = null;
this._joiKeysValidator = joi.array()
.items({
access: joi.string().required(),
secret: joi.string().required(),
})
.required();
const accountsJoi = joi.array()
.items({
name: joi.string().required(),
email: joi.string().email().required(),
arn: joi.string().required(),
canonicalID: joi.string().required(),
shortid: joi.string().regex(/^[0-9]{12}$/).required(),
keys: this._joiKeysValidator,
// backward-compat
users: joi.array(),
})
.required()
.unique('arn')
.unique('email')
.unique('canonicalID');
this._joiValidator = joi.object({ accounts: accountsJoi });
}
/**
* add one or more accounts to the authentication info
*
* @param {object} authData - authentication data
* @param {object[]} authData.accounts - array of account data
* @param {string} authData.accounts[].name - account name
* @param {string} authData.accounts[].email: email address
* @param {string} authData.accounts[].arn: account ARN,
* e.g. 'arn:aws:iam::123456789012:root'
* @param {string} authData.accounts[].canonicalID account
* canonical ID
* @param {string} authData.accounts[].shortid account ID number,
* e.g. '123456789012'
* @param {object[]} authData.accounts[].keys array of
* access/secret keys
* @param {object[]} authData.accounts[].keys[].access access key
* @param {object[]} authData.accounts[].keys[].secret secret key
* @param {string} [filePath] - optional file path info for
* logging purpose
* @return {undefined}
*/
addAccounts(authData, filePath) {
const isValid = this._validateData(authData, filePath);
if (isValid) {
this._authData.accounts =
this._authData.accounts.concat(authData.accounts);
// defer validity checking when getting data to avoid
// logging multiple times the errors (we need to validate
// all accounts at once to detect duplicate values)
if (this._isValid) {
this._isValid = null;
}
} else {
this._isValid = false;
}
}
/**
* add account information from a file
*
* @param {string} filePath - file path containing JSON
* authentication info (see {@link addAccounts()} for format)
* @return {undefined}
*/
addFile(filePath) {
const authData = JSON.parse(fs.readFileSync(filePath));
this.addAccounts(authData, filePath);
}
/**
* add account information from a filesystem path
*
* @param {string|string[]} globPattern - filesystem glob pattern,
* can be a single string or an array of glob patterns. Globs
* can be simple file paths or can contain glob matching
* characters, like '/a/b/*.json'. The matching files are
* individually loaded as JSON and accounts are added. See
* {@link addAccounts()} for JSON format.
* @return {undefined}
*/
addFilesByGlob(globPattern) {
const files = glob(globPattern);
files.forEach(filePath => this.addFile(filePath));
}
/**
* perform validation on authentication info previously
* loaded. Note that it has to be done on the entire set after an
* update to catch duplicate account IDs or access keys.
*
* @return {boolean} true if authentication info is valid
* false otherwise
*/
validate() {
if (this._isValid === null) {
this._isValid = this._validateData(this._authData);
}
return this._isValid;
}
/**
* get authentication info as a plain JS object containing all accounts
* under the "accounts" attribute, with validation.
*
* @return {object|null} the validated authentication data
* null if invalid
*/
getData() {
return this.validate() ? this._authData : null;
}
_validateData(authData, filePath) {
const res = joi.validate(authData, this._joiValidator,
{ abortEarly: false });
if (res.error) {
this._dumpJoiErrors(res.error.details, filePath);
return false;
}
let allKeys = [];
let arnError = false;
const validatedAuth = res.value;
validatedAuth.accounts.forEach(account => {
// backward-compat: ignore arn if starts with 'aws:' and log a
// warning
if (account.arn.startsWith('aws:')) {
this._log.error(
'account must have a valid AWS ARN, legacy examples ' +
'starting with \'aws:\' are not supported anymore. ' +
'Please convert to a proper account entry (see ' +
'examples at https://github.com/scality/S3/blob/' +
'master/conf/authdata.json). Also note that support ' +
'for account users has been dropped.',
{ accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
if (account.users) {
this._log.error(
'support for account users has been dropped, consider ' +
'turning users into account entries (see examples at ' +
'https://github.com/scality/S3/blob/master/conf/' +
'authdata.json)',
{ accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
const arnObj = ARN.createFromString(account.arn);
if (arnObj.error) {
this._log.error(
'authentication config validation error',
{ reason: arnObj.error.description,
accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
if (!arnObj.isIAMAccount()) {
this._log.error(
'authentication config validation error',
{ reason: 'not an IAM account ARN',
accountName: account.name, accountArn: account.arn,
filePath });
arnError = true;
return;
}
allKeys = allKeys.concat(account.keys);
});
if (arnError) {
return false;
}
const uniqueKeysRes = joi.validate(
allKeys, this._joiKeysValidator.unique('access'));
if (uniqueKeysRes.error) {
this._dumpJoiErrors(uniqueKeysRes.error.details, filePath);
return false;
}
return true;
}
_dumpJoiErrors(errors, filePath) {
errors.forEach(err => {
const logInfo = { item: err.path, filePath };
if (err.type === 'array.unique') {
logInfo.reason = `duplicate value '${err.context.path}'`;
logInfo.dupValue = err.context.value[err.context.path];
} else {
logInfo.reason = err.message;
logInfo.context = err.context;
}
this._log.error('authentication config validation error',
logInfo);
});
}
}
module.exports = AuthLoader;

View File

@ -0,0 +1,204 @@
import * as fs from 'fs';
import glob from 'simple-glob';
import joi from 'joi';
import werelogs from 'werelogs';
import * as types from './types';
import { Account, Accounts } from './types';
import ARN from '../../../models/ARN';
/** Load authentication information from files or pre-loaded account objects */
export default class AuthLoader {
#log: werelogs.Logger;
#authData: Accounts;
#isValid: 'waiting-for-validation' | 'valid' | 'invalid';
constructor(logApi: { Logger: typeof werelogs.Logger } = werelogs) {
this.#log = new logApi.Logger('S3');
this.#authData = { accounts: [] };
this.#isValid = 'waiting-for-validation';
}
/** Add one or more accounts to the authentication info */
addAccounts(authData: Accounts, filePath?: string) {
const isValid = this.#isAuthDataValid(authData, filePath);
if (isValid) {
this.#authData.accounts = [
...this.#authData.accounts,
...authData.accounts,
];
// defer validity checking when getting data to avoid
// logging multiple times the errors (we need to validate
// all accounts at once to detect duplicate values)
if (this.#isValid === 'valid') {
this.#isValid = 'waiting-for-validation';
}
} else {
this.#isValid = 'invalid';
}
}
/**
* Add account information from a file. Use { legacy: false } as an option
* to use the new, Promise-based version.
*
* @param filePath - file path containing JSON
* authentication info (see {@link addAccounts()} for format)
*/
addFile(filePath: string, options: { legacy: false }): Promise<void>;
/** @deprecated Please use Promise-version instead. */
addFile(filePath: string, options?: { legacy: true }): void;
addFile(filePath: string, options = { legacy: true }) {
// On deprecation, remove the legacy part and keep the promises.
const readFunc: any = options.legacy ? fs.readFileSync : fs.promises.readFile;
const readResult = readFunc(filePath, 'utf8') as Promise<string> | string;
const prom = Promise.resolve(readResult).then((data) => {
const authData = JSON.parse(data);
this.addAccounts(authData, filePath);
});
return options.legacy ? undefined : prom;
}
/**
* Add account information from a filesystem path
*
* @param globPattern - filesystem glob pattern,
* can be a single string or an array of glob patterns. Globs
* can be simple file paths or can contain glob matching
* characters, like '/a/b/*.json'. The matching files are
* individually loaded as JSON and accounts are added. See
* {@link addAccounts()} for JSON format.
*/
addFilesByGlob(globPattern: string | string[]) {
// FIXME switch glob to async version
const files = glob(globPattern);
files.forEach((filePath) => this.addFile(filePath));
}
/**
* Perform validation on authentication info previously
* loaded. Note that it has to be done on the entire set after an
* update to catch duplicate account IDs or access keys.
*/
validate() {
if (this.#isValid === 'waiting-for-validation') {
const isValid = this.#isAuthDataValid(this.#authData);
this.#isValid = isValid ? 'valid' : 'invalid';
}
return this.#isValid === 'valid';
}
/**
* Get authentication info as a plain JS object containing all accounts
* under the "accounts" attribute, with validation.
*/
get data() {
return this.validate() ? this.#authData : null;
}
/** backward-compat: ignore arn if starts with 'aws:' and log a warning */
#isNotLegacyAWSARN(account: Account, filePath?: string) {
if (account.arn.startsWith('aws:')) {
const { name: accountName, arn: accountArn } = account;
this.#log.error(
'account must have a valid AWS ARN, legacy examples ' +
"starting with 'aws:' are not supported anymore. " +
'Please convert to a proper account entry (see ' +
'examples at https://github.com/scality/S3/blob/' +
'master/conf/authdata.json). Also note that support ' +
'for account users has been dropped.',
{ accountName, accountArn, filePath }
);
return false;
}
return true;
}
#isValidUsers(account: Account, filePath?: string) {
if (account.users) {
const { name: accountName, arn: accountArn } = account;
this.#log.error(
'support for account users has been dropped, consider ' +
'turning users into account entries (see examples at ' +
'https://github.com/scality/S3/blob/master/conf/' +
'authdata.json)',
{ accountName, accountArn, filePath }
);
return false;
}
return true;
}
#isValidARN(account: Account, filePath?: string) {
const arnObj = ARN.createFromString(account.arn);
const { name: accountName, arn: accountArn } = account;
if (arnObj instanceof ARN) {
if (!arnObj.isIAMAccount()) {
this.#log.error('authentication config validation error', {
reason: 'not an IAM account ARN',
accountName,
accountArn,
filePath,
});
return false;
}
} else {
this.#log.error('authentication config validation error', {
reason: arnObj.error.description,
accountName,
accountArn,
filePath,
});
return false;
}
return true;
}
#isAuthDataValid(authData: any, filePath?: string) {
const options = { abortEarly: true };
const response = types.validators.accounts.validate(authData, options);
if (response.error) {
this.#dumpJoiErrors(response.error.details, filePath);
return false;
}
const validAccounts = response.value.accounts.filter(
(account: Account) =>
this.#isNotLegacyAWSARN(account, filePath) &&
this.#isValidUsers(account, filePath) &&
this.#isValidARN(account, filePath)
);
const areSomeInvalidAccounts =
validAccounts.length !== response.value.accounts.length;
if (areSomeInvalidAccounts) {
return false;
}
const keys = validAccounts.flatMap((account) => account.keys);
const uniqueKeysValidator = types.validators.keys.unique('access');
const areKeysUnique = uniqueKeysValidator.validate(keys);
if (areKeysUnique.error) {
this.#dumpJoiErrors(areKeysUnique.error.details, filePath);
return false;
}
return true;
}
#dumpJoiErrors(errors: joi.ValidationErrorItem[], filePath?: string) {
errors.forEach((err) => {
const baseLogInfo = { item: err.path, filePath };
const logInfo = () => {
if (err.type === 'array.unique') {
const reason = `duplicate value '${err.context?.path}'`;
const dupValue = err.context?.value[err.context.path];
return { ...baseLogInfo, reason, dupValue };
} else {
const reason = err.message;
const context = err.context;
return { ...baseLogInfo, reason, context };
}
};
this.#log.error(
'authentication config validation error',
logInfo()
);
});
}
}

View File

@ -1,14 +1,12 @@
'use strict'; // eslint-disable-line strict
import crypto from 'crypto';
import { Logger } from 'werelogs';
import errors from '../../../errors';
import { calculateSigningKey, hashSignature } from './vaultUtilities';
import Indexer from './Indexer';
import BaseBackend from '../base';
import { Accounts } from './types';
const crypto = require('crypto');
const errors = require('../../../errors');
const calculateSigningKey = require('./vaultUtilities').calculateSigningKey;
const hashSignature = require('./vaultUtilities').hashSignature;
const Indexer = require('./Indexer');
const BaseBackend = require('../base');
function _formatResponse(userInfoToSend) {
function _formatResponse(userInfoToSend: any) {
return {
message: {
body: { userInfo: userInfoToSend },
@ -23,21 +21,29 @@ function _formatResponse(userInfoToSend) {
* @class InMemoryBackend
*/
class InMemoryBackend extends BaseBackend {
indexer: Indexer;
formatResponse: any;
/**
* @constructor
* @param {string} service - service identifer for construction arn
* @param {Indexer} indexer - indexer instance for retrieving account info
* @param {function} formatter - function which accepts user info to send
* @param service - service identifer for construction arn
* @param indexer - indexer instance for retrieving account info
* @param formatter - function which accepts user info to send
* back and returns it in an object
*/
constructor(service, indexer, formatter) {
constructor(service: string, indexer: Indexer, formatter: typeof _formatResponse) {
super(service);
this.indexer = indexer;
this.formatResponse = formatter;
}
verifySignatureV2(stringToSign, signatureFromRequest,
accessKey, options, callback) {
verifySignatureV2(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
options: any,
callback: any,
) {
const entity = this.indexer.getEntityByKey(accessKey);
if (!entity) {
return callback(errors.InvalidAccessKeyId);
@ -52,14 +58,22 @@ class InMemoryBackend extends BaseBackend {
accountDisplayName: this.indexer.getAcctDisplayName(entity),
canonicalID: entity.canonicalID,
arn: entity.arn,
// @ts-ignore
IAMdisplayName: entity.IAMdisplayName,
};
const vaultReturnObject = this.formatResponse(userInfoToSend);
return callback(null, vaultReturnObject);
}
verifySignatureV4(stringToSign, signatureFromRequest, accessKey,
region, scopeDate, options, callback) {
verifySignatureV4(
stringToSign: string,
signatureFromRequest: string,
accessKey: string,
region: string,
scopeDate: string,
options: any,
callback: any,
) {
const entity = this.indexer.getEntityByKey(accessKey);
if (!entity) {
return callback(errors.InvalidAccessKeyId);
@ -75,13 +89,14 @@ class InMemoryBackend extends BaseBackend {
accountDisplayName: this.indexer.getAcctDisplayName(entity),
canonicalID: entity.canonicalID,
arn: entity.arn,
// @ts-ignore
IAMdisplayName: entity.IAMdisplayName,
};
const vaultReturnObject = this.formatResponse(userInfoToSend);
return callback(null, vaultReturnObject);
}
getCanonicalIds(emails, log, cb) {
getCanonicalIds(emails: string[], log: Logger, cb: any) {
const results = {};
emails.forEach(email => {
const lowercasedEmail = email.toLowerCase();
@ -101,7 +116,7 @@ class InMemoryBackend extends BaseBackend {
return cb(null, vaultReturnObject);
}
getEmailAddresses(canonicalIDs, options, cb) {
getEmailAddresses(canonicalIDs: string[], options: any, cb: any) {
const results = {};
canonicalIDs.forEach(canonicalId => {
const foundEntity = this.indexer.getEntityByCanId(canonicalId);
@ -122,14 +137,14 @@ class InMemoryBackend extends BaseBackend {
/**
* Gets accountIds for a list of accounts based on
* the canonical IDs associated with the account
* @param {array} canonicalIDs - list of canonicalIDs
* @param {object} options - to send log id to vault
* @param {function} cb - callback to calling function
* @returns {function} callback with either error or
* @param canonicalIDs - list of canonicalIDs
* @param options - to send log id to vault
* @param cb - callback to calling function
* @returns callback with either error or
* an object from Vault containing account canonicalID
* as each object key and an accountId as the value (or "NotFound")
*/
getAccountIds(canonicalIDs, options, cb) {
getAccountIds(canonicalIDs: string[], options: any, cb: any) {
const results = {};
canonicalIDs.forEach(canonicalID => {
const foundEntity = this.indexer.getEntityByCanId(canonicalID);
@ -146,33 +161,34 @@ class InMemoryBackend extends BaseBackend {
};
return cb(null, vaultReturnObject);
}
report(log: Logger, callback: any) {
return callback(null, {});
}
}
class S3AuthBackend extends InMemoryBackend {
/**
* @constructor
* @param {object} authdata - the authentication config file's data
* @param {object[]} authdata.accounts - array of account objects
* @param {string=} authdata.accounts[].name - account name
* @param {string} authdata.accounts[].email - account email
* @param {string} authdata.accounts[].arn - IAM resource name
* @param {string} authdata.accounts[].canonicalID - account canonical ID
* @param {string} authdata.accounts[].shortid - short account ID
* @param {object[]=} authdata.accounts[].keys - array of key objects
* @param {string} authdata.accounts[].keys[].access - access key
* @param {string} authdata.accounts[].keys[].secret - secret key
* @return {undefined}
* @param authdata - the authentication config file's data
* @param authdata.accounts - array of account objects
* @param authdata.accounts[].name - account name
* @param authdata.accounts[].email - account email
* @param authdata.accounts[].arn - IAM resource name
* @param authdata.accounts[].canonicalID - account canonical ID
* @param authdata.accounts[].shortid - short account ID
* @param authdata.accounts[].keys - array of key objects
* @param authdata.accounts[].keys[].access - access key
* @param authdata.accounts[].keys[].secret - secret key
*/
constructor(authdata) {
constructor(authdata?: Accounts) {
super('s3', new Indexer(authdata), _formatResponse);
}
refreshAuthData(authData) {
refreshAuthData(authData?: Accounts) {
this.indexer = new Indexer(authData);
}
}
module.exports = {
s3: S3AuthBackend,
};
export { S3AuthBackend as s3 }

View File

@ -1,145 +0,0 @@
/**
* Class that provides an internal indexing over the simple data provided by
* the authentication configuration file for the memory backend. This allows
* accessing the different authentication entities through various types of
* keys.
*
* @class Indexer
*/
class Indexer {
/**
* @constructor
* @param {object} authdata - the authentication config file's data
* @param {object[]} authdata.accounts - array of account objects
* @param {string=} authdata.accounts[].name - account name
* @param {string} authdata.accounts[].email - account email
* @param {string} authdata.accounts[].arn - IAM resource name
* @param {string} authdata.accounts[].canonicalID - account canonical ID
* @param {string} authdata.accounts[].shortid - short account ID
* @param {object[]=} authdata.accounts[].keys - array of key objects
* @param {string} authdata.accounts[].keys[].access - access key
* @param {string} authdata.accounts[].keys[].secret - secret key
* @return {undefined}
*/
constructor(authdata) {
this.accountsBy = {
canId: {},
accessKey: {},
email: {},
};
/*
* This may happen if the application is configured to use another
* authentication backend than in-memory.
* As such, we're managing the error here to avoid screwing up there.
*/
if (!authdata) {
return;
}
this._build(authdata);
}
_indexAccount(account) {
const accountData = {
arn: account.arn,
canonicalID: account.canonicalID,
shortid: account.shortid,
accountDisplayName: account.name,
email: account.email.toLowerCase(),
keys: [],
};
this.accountsBy.canId[accountData.canonicalID] = accountData;
this.accountsBy.email[accountData.email] = accountData;
if (account.keys !== undefined) {
account.keys.forEach(key => {
accountData.keys.push(key);
this.accountsBy.accessKey[key.access] = accountData;
});
}
}
_build(authdata) {
authdata.accounts.forEach(account => {
this._indexAccount(account);
});
}
/**
* This method returns the account associated to a canonical ID.
*
* @param {string} canId - The canonicalId of the account
* @return {Object} account - The account object
* @return {Object} account.arn - The account's ARN
* @return {Object} account.canonicalID - The account's canonical ID
* @return {Object} account.shortid - The account's internal shortid
* @return {Object} account.accountDisplayName - The account's display name
* @return {Object} account.email - The account's lowercased email
*/
getEntityByCanId(canId) {
return this.accountsBy.canId[canId];
}
/**
* This method returns the entity (either an account or a user) associated
* to a canonical ID.
*
* @param {string} key - The accessKey of the entity
* @return {Object} entity - The entity object
* @return {Object} entity.arn - The entity's ARN
* @return {Object} entity.canonicalID - The canonical ID for the entity's
* account
* @return {Object} entity.shortid - The entity's internal shortid
* @return {Object} entity.accountDisplayName - The entity's account
* display name
* @return {Object} entity.IAMDisplayName - The user's display name
* (if the entity is an user)
* @return {Object} entity.email - The entity's lowercased email
*/
getEntityByKey(key) {
return this.accountsBy.accessKey[key];
}
/**
* This method returns the entity (either an account or a user) associated
* to an email address.
*
* @param {string} email - The email address
* @return {Object} entity - The entity object
* @return {Object} entity.arn - The entity's ARN
* @return {Object} entity.canonicalID - The canonical ID for the entity's
* account
* @return {Object} entity.shortid - The entity's internal shortid
* @return {Object} entity.accountDisplayName - The entity's account
* display name
* @return {Object} entity.IAMDisplayName - The user's display name
* (if the entity is an user)
* @return {Object} entity.email - The entity's lowercased email
*/
getEntityByEmail(email) {
const lowerCasedEmail = email.toLowerCase();
return this.accountsBy.email[lowerCasedEmail];
}
/**
* This method returns the secret key associated with the entity.
* @param {Object} entity - the entity object
* @param {string} accessKey - access key
* @returns {string} secret key
*/
getSecretKey(entity, accessKey) {
return entity.keys
.filter(kv => kv.access === accessKey)[0].secret;
}
/**
* This method returns the account display name associated with the entity.
* @param {Object} entity - the entity object
* @returns {string} account display name
*/
getAcctDisplayName(entity) {
return entity.accountDisplayName;
}
}
module.exports = Indexer;

View File

@ -0,0 +1,93 @@
import { Accounts, Account, Entity } from './types';
/**
* Class that provides an internal indexing over the simple data provided by
* the authentication configuration file for the memory backend. This allows
* accessing the different authentication entities through various types of
* keys.
*/
export default class Indexer {
accountsBy: {
canId: { [id: string]: Entity | undefined },
accessKey: { [id: string]: Entity | undefined },
email: { [id: string]: Entity | undefined },
}
constructor(authdata?: Accounts) {
this.accountsBy = {
canId: {},
accessKey: {},
email: {},
};
/*
* This may happen if the application is configured to use another
* authentication backend than in-memory.
* As such, we're managing the error here to avoid screwing up there.
*/
if (!authdata) {
return;
}
this.#build(authdata);
}
#indexAccount(account: Account) {
const accountData: Entity = {
arn: account.arn,
canonicalID: account.canonicalID,
shortid: account.shortid,
accountDisplayName: account.name,
email: account.email.toLowerCase(),
keys: [],
};
this.accountsBy.canId[accountData.canonicalID] = accountData;
this.accountsBy.email[accountData.email] = accountData;
if (account.keys !== undefined) {
account.keys.forEach(key => {
accountData.keys.push(key);
this.accountsBy.accessKey[key.access] = accountData;
});
}
}
#build(authdata: Accounts) {
authdata.accounts.forEach(account => {
this.#indexAccount(account);
});
}
/** This method returns the account associated to a canonical ID. */
getEntityByCanId(canId: string): Entity | undefined {
return this.accountsBy.canId[canId];
}
/**
* This method returns the entity (either an account or a user) associated
* to a canonical ID.
* @param {string} key - The accessKey of the entity
*/
getEntityByKey(key: string): Entity | undefined {
return this.accountsBy.accessKey[key];
}
/**
* This method returns the entity (either an account or a user) associated
* to an email address.
*/
getEntityByEmail(email: string): Entity | undefined {
const lowerCasedEmail = email.toLowerCase();
return this.accountsBy.email[lowerCasedEmail];
}
/** This method returns the secret key associated with the entity. */
getSecretKey(entity: Entity, accessKey: string) {
const keys = entity.keys.filter(kv => kv.access === accessKey);
return keys[0].secret;
}
/** This method returns the account display name associated with the entity. */
getAcctDisplayName(entity: Entity) {
return entity.accountDisplayName;
}
}

View File

@ -0,0 +1,51 @@
import joi from 'joi';
export type Callback<Data = any> = (err?: Error | null | undefined, data?: Data) => void;
export type Credentials = { access: string; secret: string };
export type Base = {
arn: string;
canonicalID: string;
shortid: string;
email: string;
keys: Credentials[];
};
export type Account = Base & { name: string; users: any[] };
export type Accounts = { accounts: Account[] };
export type Entity = Base & { accountDisplayName: string };
const keys = ((): joi.ArraySchema => {
const str = joi.string().required();
const items = { access: str, secret: str };
return joi.array().items(items).required();
})();
const account = (() => {
return joi.object<Account>({
name: joi.string().required(),
email: joi.string().email().required(),
arn: joi.string().required(),
canonicalID: joi.string().required(),
shortid: joi
.string()
.regex(/^[0-9]{12}$/)
.required(),
keys: keys,
// backward-compat
users: joi.array(),
});
})();
const accounts = (() => {
return joi.object<Accounts>({
accounts: joi
.array()
.items(account)
.required()
.unique('arn')
.unique('email')
.unique('canonicalID'),
});
})();
export const validators = { keys, account, accounts };

View File

@ -1,18 +0,0 @@
const AuthLoader = require('./AuthLoader');
/**
* @deprecated please use {@link AuthLoader} class instead
*
* @param {object} authdata - the authentication config file's data
* @param {werelogs.API} logApi - object providing a constructor function
* for the Logger object
* @return {boolean} true on erroneous data
* false on success
*/
function validateAuthConfig(authdata, logApi) {
const authLoader = new AuthLoader(logApi);
authLoader.addAccounts(authdata);
return !authLoader.validate();
}
module.exports = validateAuthConfig;

View File

@ -0,0 +1,16 @@
import { Logger } from 'werelogs';
import AuthLoader from './AuthLoader';
import { Accounts } from './types';
/**
* @deprecated please use {@link AuthLoader} class instead
* @return true on erroneous data false on success
*/
export default function validateAuthConfig(
authdata: Accounts,
logApi?: { Logger: typeof Logger }
) {
const authLoader = new AuthLoader(logApi);
authLoader.addAccounts(authdata);
return !authLoader.validate();
}

View File

@ -1,6 +1,4 @@
'use strict'; // eslint-disable-line strict
const crypto = require('crypto');
import * as crypto from 'crypto';
/** hashSignature for v2 Auth
* @param {string} stringToSign - built string to sign per AWS rules
@ -8,11 +6,19 @@ const crypto = require('crypto');
* @param {string} algorithm - either SHA256 or SHA1
* @return {string} reconstructed signature
*/
function hashSignature(stringToSign, secretKey, algorithm) {
export function hashSignature(
stringToSign: string,
secretKey: string,
algorithm: 'SHA256' | 'SHA1'
): string {
const hmacObject = crypto.createHmac(algorithm, secretKey);
return hmacObject.update(stringToSign, 'binary').digest('base64');
}
const sha256Digest = (key: string | Buffer, data: string) => {
return crypto.createHmac('sha256', key).update(data, 'binary').digest();
};
/** calculateSigningKey for v4 Auth
* @param {string} secretKey - requester's secretKey
* @param {string} region - region included in request
@ -20,16 +26,15 @@ function hashSignature(stringToSign, secretKey, algorithm) {
* @param {string} [service] - To specify another service than s3
* @return {string} signingKey - signingKey to calculate signature
*/
function calculateSigningKey(secretKey, region, scopeDate, service) {
const dateKey = crypto.createHmac('sha256', `AWS4${secretKey}`)
.update(scopeDate, 'binary').digest();
const dateRegionKey = crypto.createHmac('sha256', dateKey)
.update(region, 'binary').digest();
const dateRegionServiceKey = crypto.createHmac('sha256', dateRegionKey)
.update(service || 's3', 'binary').digest();
const signingKey = crypto.createHmac('sha256', dateRegionServiceKey)
.update('aws4_request', 'binary').digest();
export function calculateSigningKey(
secretKey: string,
region: string,
scopeDate: string,
service?: string
): Buffer {
const dateKey = sha256Digest(`AWS4${secretKey}`, scopeDate);
const dateRegionKey = sha256Digest(dateKey, region);
const dateRegionServiceKey = sha256Digest(dateRegionKey, service || 's3');
const signingKey = sha256Digest(dateRegionServiceKey, 'aws4_request');
return signingKey;
}
module.exports = { hashSignature, calculateSigningKey };

View File

@ -1,7 +1,5 @@
'use strict'; // eslint-disable-line strict
function algoCheck(signatureLength) {
let algo;
export default function algoCheck(signatureLength: number) {
let algo: 'sha256' | 'sha1';
// If the signature sent is 44 characters,
// this means that sha256 was used:
// 44 characters in base64
@ -13,7 +11,6 @@ function algoCheck(signatureLength) {
if (signatureLength === SHA1LEN) {
algo = 'sha1';
}
// @ts-ignore
return algo;
}
module.exports = algoCheck;

View File

@ -1,11 +0,0 @@
'use strict'; // eslint-disable-line strict
const headerAuthCheck = require('./headerAuthCheck');
const queryAuthCheck = require('./queryAuthCheck');
const authV2 = {
header: headerAuthCheck,
query: queryAuthCheck,
};
module.exports = authV2;

2
lib/auth/v2/authV2.ts Normal file
View File

@ -0,0 +1,2 @@
export * as header from './headerAuthCheck';
export * as query from './queryAuthCheck';

View File

@ -1,9 +1,9 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../errors');
import { Logger } from 'werelogs';
import errors from '../../errors';
const epochTime = new Date('1970-01-01').getTime();
function checkRequestExpiry(timestamp, log) {
export default function checkRequestExpiry(timestamp: number, log: Logger) {
// If timestamp is before epochTime, the request is invalid and return
// errors.AccessDenied
if (timestamp < epochTime) {
@ -32,5 +32,3 @@ function checkRequestExpiry(timestamp, log) {
return undefined;
}
module.exports = checkRequestExpiry;

View File

@ -1,11 +1,14 @@
'use strict'; // eslint-disable-line strict
import { Logger } from 'werelogs';
import utf8 from 'utf8';
import getCanonicalizedAmzHeaders from './getCanonicalizedAmzHeaders';
import getCanonicalizedResource from './getCanonicalizedResource';
const utf8 = require('utf8');
const getCanonicalizedAmzHeaders = require('./getCanonicalizedAmzHeaders');
const getCanonicalizedResource = require('./getCanonicalizedResource');
function constructStringToSign(request, data, log, clientType) {
export default function constructStringToSign(
request: any,
data: { [key: string]: string },
log: Logger,
clientType?: any
) {
/*
Build signature per AWS requirements:
StringToSign = HTTP-Verb + '\n' +
@ -42,5 +45,3 @@ function constructStringToSign(request, data, log, clientType) {
+ getCanonicalizedResource(request, clientType);
return utf8.encode(stringToSign);
}
module.exports = constructStringToSign;

View File

@ -1,14 +1,12 @@
'use strict'; // eslint-disable-line strict
function getCanonicalizedAmzHeaders(headers, clientType) {
export default function getCanonicalizedAmzHeaders(headers: Headers, clientType: string) {
/*
Iterate through headers and pull any headers that are x-amz headers.
Need to include 'x-amz-date' here even though AWS docs
ambiguous on this.
*/
const filterFn = clientType === 'GCP' ?
val => val.substr(0, 7) === 'x-goog-' :
val => val.substr(0, 6) === 'x-amz-';
(val: string) => val.substr(0, 7) === 'x-goog-' :
(val: string) => val.substr(0, 6) === 'x-amz-';
const amzHeaders = Object.keys(headers)
.filter(filterFn)
.map(val => [val.trim(), headers[val].trim()]);
@ -43,5 +41,3 @@ function getCanonicalizedAmzHeaders(headers, clientType) {
`${headerStr}${current[0]}:${current[1]}\n`,
'');
}
module.exports = getCanonicalizedAmzHeaders;

View File

@ -1,6 +1,4 @@
'use strict'; // eslint-disable-line strict
const url = require('url');
import * as url from 'url';
const gcpSubresources = [
'acl',
@ -41,7 +39,7 @@ const awsSubresources = [
'website',
];
function getCanonicalizedResource(request, clientType) {
export default function getCanonicalizedResource(request: any, clientType: string) {
/*
This variable is used to determine whether to insert
a '?' or '&'. Once a query parameter is added to the resourceString,
@ -117,5 +115,3 @@ function getCanonicalizedResource(request, clientType) {
}
return resourceString;
}
module.exports = getCanonicalizedResource;

View File

@ -1,12 +1,11 @@
'use strict'; // eslint-disable-line strict
import { Logger } from 'werelogs';
import errors from '../../errors';
import * as constants from '../../constants';
import constructStringToSign from './constructStringToSign';
import checkRequestExpiry from './checkRequestExpiry';
import algoCheck from './algoCheck';
const errors = require('../../errors');
const constants = require('../../constants');
const constructStringToSign = require('./constructStringToSign');
const checkRequestExpiry = require('./checkRequestExpiry');
const algoCheck = require('./algoCheck');
function check(request, log, data) {
export function check(request: any, log: Logger, data: { [key: string]: string }) {
log.trace('running header auth check');
const headers = request.headers;
@ -52,6 +51,7 @@ function check(request, log, data) {
log.trace('invalid authorization header', { authInfo });
return { err: errors.MissingSecurityHeader };
}
// @ts-ignore
log.addDefaultFields({ accessKey });
const signatureFromRequest = authInfo.substring(semicolonIndex + 1).trim();
@ -80,5 +80,3 @@ function check(request, log, data) {
},
};
}
module.exports = { check };

View File

@ -1,11 +1,10 @@
'use strict'; // eslint-disable-line strict
import { Logger } from 'werelogs';
import errors from '../../errors';
import * as constants from '../../constants';
import algoCheck from './algoCheck';
import constructStringToSign from './constructStringToSign';
const errors = require('../../errors');
const constants = require('../../constants');
const algoCheck = require('./algoCheck');
const constructStringToSign = require('./constructStringToSign');
function check(request, log, data) {
export function check(request: any, log: Logger, data: { [key: string]: string }) {
log.trace('running query auth check');
if (request.method === 'POST') {
log.debug('query string auth not supported for post requests');
@ -51,6 +50,7 @@ function check(request, log, data) {
return { err: errors.RequestTimeTooSkewed };
}
const accessKey = data.AWSAccessKeyId;
// @ts-ignore
log.addDefaultFields({ accessKey });
const signatureFromRequest = decodeURIComponent(data.Signature);
@ -82,5 +82,3 @@ function check(request, log, data) {
},
};
}
module.exports = { check };

View File

@ -1,11 +0,0 @@
'use strict'; // eslint-disable-line strict
const headerAuthCheck = require('./headerAuthCheck');
const queryAuthCheck = require('./queryAuthCheck');
const authV4 = {
header: headerAuthCheck,
query: queryAuthCheck,
};
module.exports = authV4;

2
lib/auth/v4/authV4.ts Normal file
View File

@ -0,0 +1,2 @@
export * as header from './headerAuthCheck';
export * as query from './queryAuthCheck';

View File

@ -1,5 +1,3 @@
'use strict'; // eslint-disable-line strict
/*
AWS's URI encoding rules:
URI encode every byte. Uri-Encode() must enforce the following rules:
@ -19,7 +17,7 @@ See http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-header-based-auth.html
*/
// converts utf8 character to hex and pads "%" before every two hex digits
function _toHexUTF8(char) {
function _toHexUTF8(char: string) {
const hexRep = Buffer.from(char, 'utf8').toString('hex').toUpperCase();
let res = '';
hexRep.split('').forEach((v, n) => {
@ -32,16 +30,21 @@ function _toHexUTF8(char) {
return res;
}
function awsURIencode(input, encodeSlash, noEncodeStar) {
const encSlash = encodeSlash === undefined ? true : encodeSlash;
let encoded = '';
export default function awsURIencode(
input: string,
encodeSlash?: boolean,
noEncodeStar?: boolean
) {
/**
* Duplicate query params are not suppported by AWS S3 APIs. These params
* are parsed as Arrays by Node.js HTTP parser which breaks this method
*/
if (typeof input !== 'string') {
return encoded;
*/
if (typeof input !== 'string') {
return '';
}
let encoded = "";
const slash = encodeSlash === undefined || encodeSlash ? '%2F' : '/';
const star = noEncodeStar !== undefined && noEncodeStar ? '*' : '%2A';
for (let i = 0; i < input.length; i++) {
let ch = input.charAt(i);
if ((ch >= 'A' && ch <= 'Z') ||
@ -53,9 +56,9 @@ function awsURIencode(input, encodeSlash, noEncodeStar) {
} else if (ch === ' ') {
encoded = encoded.concat('%20');
} else if (ch === '/') {
encoded = encoded.concat(encSlash ? '%2F' : ch);
encoded = encoded.concat(slash);
} else if (ch === '*') {
encoded = encoded.concat(noEncodeStar ? '*' : '%2A');
encoded = encoded.concat(star);
} else {
if (ch >= '\uD800' && ch <= '\uDBFF') {
// If this character is a high surrogate peek the next character
@ -76,5 +79,3 @@ function awsURIencode(input, encodeSlash, noEncodeStar) {
}
return encoded;
}
module.exports = awsURIencode;

View File

@ -1,17 +1,33 @@
'use strict'; // eslint-disable-line strict
const crypto = require('crypto');
const createCanonicalRequest = require('./createCanonicalRequest');
import * as crypto from 'crypto';
import { Logger } from 'werelogs';
import createCanonicalRequest from './createCanonicalRequest';
/**
* constructStringToSign - creates V4 stringToSign
* @param {object} params - params object
* @returns {string} - stringToSign
*/
function constructStringToSign(params) {
const { request, signedHeaders, payloadChecksum, credentialScope, timestamp,
query, log, proxyPath } = params;
export default function constructStringToSign(params: {
request: any;
signedHeaders: any;
payloadChecksum: any;
credentialScope: string;
timestamp: string;
query: { [key: string]: string };
log?: Logger;
proxyPath?: string;
awsService: string;
}): string | Error {
const {
request,
signedHeaders,
payloadChecksum,
credentialScope,
timestamp,
query,
log,
proxyPath,
} = params;
const path = proxyPath || request.path;
const canonicalReqResult = createCanonicalRequest({
@ -24,6 +40,8 @@ function constructStringToSign(params) {
service: params.awsService,
});
// TODO Why that line?
// @ts-ignore
if (canonicalReqResult instanceof Error) {
if (log) {
log.error('error creating canonicalRequest');
@ -40,5 +58,3 @@ function constructStringToSign(params) {
`${credentialScope}\n${canonicalHex}`;
return stringToSign;
}
module.exports = constructStringToSign;

View File

@ -1,27 +1,33 @@
'use strict'; // eslint-disable-line strict
const awsURIencode = require('./awsURIencode');
const crypto = require('crypto');
const queryString = require('querystring');
import * as crypto from 'crypto';
import * as queryString from 'querystring';
import awsURIencode from './awsURIencode';
/**
* createCanonicalRequest - creates V4 canonical request
* @param {object} params - contains pHttpVerb (request type),
* @param params - contains pHttpVerb (request type),
* pResource (parsed from URL), pQuery (request query),
* pHeaders (request headers), pSignedHeaders (signed headers from request),
* payloadChecksum (from request)
* @returns {string} - canonicalRequest
* @returns - canonicalRequest
*/
function createCanonicalRequest(params) {
export default function createCanonicalRequest(
params: {
pHttpVerb: string;
pResource: string;
pQuery: { [key: string]: string };
pHeaders: any;
pSignedHeaders: any;
service: string;
payloadChecksum: string;
}
) {
const pHttpVerb = params.pHttpVerb;
const pResource = params.pResource;
const pQuery = params.pQuery;
const pHeaders = params.pHeaders;
const pSignedHeaders = params.pSignedHeaders;
const service = params.service;
let payloadChecksum = params.payloadChecksum;
if (!payloadChecksum) {
if (pHttpVerb === 'GET') {
payloadChecksum = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b' +
@ -34,7 +40,7 @@ function createCanonicalRequest(params) {
if (/aws-sdk-java\/[0-9.]+/.test(pHeaders['user-agent'])) {
notEncodeStar = true;
}
let payload = queryString.stringify(pQuery, null, null, {
let payload = queryString.stringify(pQuery, undefined, undefined, {
encodeURIComponent: input => awsURIencode(input, true,
notEncodeStar),
});
@ -61,11 +67,11 @@ function createCanonicalRequest(params) {
// signed headers
const signedHeadersList = pSignedHeaders.split(';');
signedHeadersList.sort((a, b) => a.localeCompare(b));
signedHeadersList.sort((a: any, b: any) => a.localeCompare(b));
const signedHeaders = signedHeadersList.join(';');
// canonical headers
const canonicalHeadersList = signedHeadersList.map(signedHeader => {
const canonicalHeadersList = signedHeadersList.map((signedHeader: any) => {
if (pHeaders[signedHeader] !== undefined) {
const trimmedHeader = pHeaders[signedHeader]
.trim().replace(/\s+/g, ' ');
@ -87,5 +93,3 @@ function createCanonicalRequest(params) {
`${signedHeaders}\n${payloadChecksum}`;
return canonicalRequest;
}
module.exports = createCanonicalRequest;

View File

@ -1,27 +1,32 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../../lib/errors');
const constants = require('../../constants');
const constructStringToSign = require('./constructStringToSign');
const checkTimeSkew = require('./timeUtils').checkTimeSkew;
const convertUTCtoISO8601 = require('./timeUtils').convertUTCtoISO8601;
const convertAmzTimeToMs = require('./timeUtils').convertAmzTimeToMs;
const extractAuthItems = require('./validateInputs').extractAuthItems;
const validateCredentials = require('./validateInputs').validateCredentials;
const areSignedHeadersComplete =
require('./validateInputs').areSignedHeadersComplete;
import { Logger } from 'werelogs';
import errors from '../../../lib/errors';
import * as constants from '../../constants';
import constructStringToSign from './constructStringToSign';
import {
checkTimeSkew,
convertUTCtoISO8601,
convertAmzTimeToMs,
} from './timeUtils';
import {
extractAuthItems,
validateCredentials,
areSignedHeadersComplete,
} from './validateInputs';
/**
* V4 header auth check
* @param {object} request - HTTP request object
* @param {object} log - logging object
* @param {object} data - Parameters from queryString parsing or body of
* @param request - HTTP request object
* @param log - logging object
* @param data - Parameters from queryString parsing or body of
* POST request
* @param {string} awsService - Aws service ('iam' or 's3')
* @return {callback} calls callback
* @param awsService - Aws service ('iam' or 's3')
*/
function check(request, log, data, awsService) {
export function check(
request: any,
log: Logger,
data: { [key: string]: string },
awsService: string
) {
log.trace('running header auth check');
const token = request.headers['x-amz-security-token'];
@ -62,16 +67,16 @@ function check(request, log, data, awsService) {
log.trace('authorization header from request', { authHeader });
const signatureFromRequest = authHeaderItems.signatureFromRequest;
const credentialsArr = authHeaderItems.credentialsArr;
const signedHeaders = authHeaderItems.signedHeaders;
const signatureFromRequest = authHeaderItems.signatureFromRequest!;
const credentialsArr = authHeaderItems.credentialsArr!;
const signedHeaders = authHeaderItems.signedHeaders!;
if (!areSignedHeadersComplete(signedHeaders, request.headers)) {
log.debug('signedHeaders are incomplete', { signedHeaders });
return { err: errors.AccessDenied };
}
let timestamp;
let timestamp: string | undefined;
// check request timestamp
const xAmzDate = request.headers['x-amz-date'];
if (xAmzDate) {
@ -127,7 +132,7 @@ function check(request, log, data, awsService) {
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath = null;
let proxyPath: string | undefined;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
@ -178,5 +183,3 @@ function check(request, log, data, awsService) {
},
};
}
module.exports = { check };

View File

@ -1,24 +1,18 @@
'use strict'; // eslint-disable-line strict
const constants = require('../../constants');
const errors = require('../../errors');
const constructStringToSign = require('./constructStringToSign');
const checkTimeSkew = require('./timeUtils').checkTimeSkew;
const convertAmzTimeToMs = require('./timeUtils').convertAmzTimeToMs;
const validateCredentials = require('./validateInputs').validateCredentials;
const extractQueryParams = require('./validateInputs').extractQueryParams;
const areSignedHeadersComplete =
require('./validateInputs').areSignedHeadersComplete;
import { Logger } from 'werelogs';
import * as constants from '../../constants';
import errors from '../../errors';
import constructStringToSign from './constructStringToSign';
import { checkTimeSkew, convertAmzTimeToMs } from './timeUtils';
import { validateCredentials, extractQueryParams } from './validateInputs';
import { areSignedHeadersComplete } from './validateInputs';
/**
* V4 query auth check
* @param {object} request - HTTP request object
* @param {object} log - logging object
* @param {object} data - Contain authentification params (GET or POST data)
* @return {callback} calls callback
* @param request - HTTP request object
* @param log - logging object
* @param data - Contain authentification params (GET or POST data)
*/
function check(request, log, data) {
export function check(request: any, log: Logger, data: { [key: string]: string }) {
const authParams = extractQueryParams(data, log);
if (Object.keys(authParams).length !== 5) {
@ -33,11 +27,11 @@ function check(request, log, data) {
return { err: errors.InvalidToken };
}
const signedHeaders = authParams.signedHeaders;
const signatureFromRequest = authParams.signatureFromRequest;
const timestamp = authParams.timestamp;
const expiry = authParams.expiry;
const credential = authParams.credential;
const signedHeaders = authParams.signedHeaders!;
const signatureFromRequest = authParams.signatureFromRequest!;
const timestamp = authParams.timestamp!;
const expiry = authParams.expiry!;
const credential = authParams.credential!;
if (!areSignedHeadersComplete(signedHeaders, request.headers)) {
log.debug('signedHeaders are incomplete', { signedHeaders });
@ -62,7 +56,7 @@ function check(request, log, data) {
return { err: errors.RequestTimeTooSkewed };
}
let proxyPath = null;
let proxyPath: string | undefined;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
@ -122,5 +116,3 @@ function check(request, log, data) {
},
};
}
module.exports = { check };

View File

@ -1,33 +1,67 @@
const { Transform } = require('stream');
import { Transform } from 'stream';
import async from 'async';
import errors from '../../../errors';
import { Logger } from 'werelogs';
import Vault, { AuthV4RequestParams } from '../../Vault';
import { Callback } from '../../backends/in_memory/types';
const async = require('async');
const errors = require('../../../errors');
import constructChunkStringToSign from './constructChunkStringToSign';
const constructChunkStringToSign = require('./constructChunkStringToSign');
export type TransformParams = {
accessKey: string;
signatureFromRequest: string;
region: string;
scopeDate: string;
timestamp: string;
credentialScope: string;
};
/**
* This class is designed to handle the chunks sent in a streaming
* v4 Auth request
*/
class V4Transform extends Transform {
export default class V4Transform extends Transform {
log: Logger;
cb: Callback;
accessKey: string;
region: string;
scopeDate: string;
timestamp: string;
credentialScope: string;
lastSignature: string;
currentSignature?: string;
haveMetadata: boolean;
seekingDataSize: number;
currentData?: any;
dataCursor: number;
currentMetadata: any[];
lastPieceDone: boolean;
lastChunk: boolean;
vault: Vault;
/**
* @constructor
* @param {object} streamingV4Params - info for chunk authentication
* @param {string} streamingV4Params.accessKey - requester's accessKey
* @param {string} streamingV4Params.signatureFromRequest - signature
* @param streamingV4Params - info for chunk authentication
* @param streamingV4Params.accessKey - requester's accessKey
* @param streamingV4Params.signatureFromRequest - signature
* sent with headers
* @param {string} streamingV4Params.region - region sent with auth header
* @param {string} streamingV4Params.scopeDate - date sent with auth header
* @param {string} streamingV4Params.timestamp - date parsed from headers
* @param streamingV4Params.region - region sent with auth header
* @param streamingV4Params.scopeDate - date sent with auth header
* @param streamingV4Params.timestamp - date parsed from headers
* in ISO 8601 format: YYYYMMDDTHHMMSSZ
* @param {string} streamingV4Params.credentialScope - items from auth
* @param streamingV4Params.credentialScope - items from auth
* header plus the string 'aws4_request' joined with '/':
* timestamp/region/aws-service/aws4_request
* @param {object} vault - Vault instance passed from CloudServer
* @param {object} log - logger object
* @param {function} cb - callback to api
* @param vault - Vault instance passed from CloudServer
* @param log - logger object
* @param cb - callback to api
*/
constructor(streamingV4Params, vault, log, cb) {
constructor(
streamingV4Params: TransformParams,
vault: Vault,
log: Logger,
cb: Callback,
) {
const { accessKey, signatureFromRequest, region, scopeDate, timestamp,
credentialScope } = streamingV4Params;
super({});
@ -55,8 +89,8 @@ class V4Transform extends Transform {
/**
* This function will parse the metadata portion of the chunk
* @param {Buffer} remainingChunk - chunk sent from _transform
* @return {object} response - if error, will return 'err' key with
* @param remainingChunk - chunk sent from _transform
* @return response - if error, will return 'err' key with
* arsenal error value.
* if incomplete metadata, will return 'completeMetadata' key with
* value false
@ -64,7 +98,7 @@ class V4Transform extends Transform {
* value true and the key 'unparsedChunk' with the remaining chunk without
* the parsed metadata piece
*/
_parseMetadata(remainingChunk) {
_parseMetadata(remainingChunk: Buffer) {
let remainingPlusStoredMetadata = remainingChunk;
// have metadata pieces so need to add to the front of
// remainingChunk
@ -103,9 +137,8 @@ class V4Transform extends Transform {
'metadata format');
return { err: errors.InvalidArgument };
}
let dataSize = splitMeta[0];
// chunk-size is sent in hex
dataSize = Number.parseInt(dataSize, 16);
const dataSize = Number.parseInt(splitMeta[0], 16);
if (Number.isNaN(dataSize)) {
this.log.trace('chunk body did not contain valid size');
return { err: errors.InvalidArgument };
@ -139,28 +172,30 @@ class V4Transform extends Transform {
/**
* Build the stringToSign and authenticate the chunk
* @param {Buffer} dataToSend - chunk sent from _transform or null
* @param dataToSend - chunk sent from _transform or null
* if last chunk without data
* @param {function} done - callback to _transform
* @return {function} executes callback with err if applicable
* @param done - callback to _transform
* @return executes callback with err if applicable
*/
_authenticate(dataToSend, done) {
_authenticate(dataToSend: Buffer | null, done: Callback) {
// use prior sig to construct new string to sign
const stringToSign = constructChunkStringToSign(this.timestamp,
this.credentialScope, this.lastSignature, dataToSend);
this.credentialScope, this.lastSignature, dataToSend ?? undefined);
this.log.trace('constructed chunk string to sign',
{ stringToSign });
// once used prior sig to construct string to sign, reassign
// lastSignature to current signature
this.lastSignature = this.currentSignature;
const vaultParams = {
this.lastSignature = this.currentSignature!;
const vaultParams: AuthV4RequestParams = {
log: this.log,
data: {
accessKey: this.accessKey,
signatureFromRequest: this.currentSignature,
signatureFromRequest: this.currentSignature!,
region: this.region,
scopeDate: this.scopeDate,
stringToSign,
// TODO FIXME This can not work
// @ts-expect-errors
timestamp: this.timestamp,
credentialScope: this.credentialScope,
},
@ -181,12 +216,12 @@ class V4Transform extends Transform {
* use the metadata to authenticate with vault and send the
* data on to be stored if authentication passes
*
* @param {Buffer} chunk - chunk from request body
* @param {string} encoding - Data encoding
* @param {function} callback - Callback(err, justDataChunk, encoding)
* @return {function }executes callback with err if applicable
* @param chunk - chunk from request body
* @param _encoding - Data encoding unused
* @param callback - Callback(err, justDataChunk, encoding)
* @return executes callback with err if applicable
*/
_transform(chunk, encoding, callback) {
_transform(chunk: Buffer, _encoding: string, callback: Callback) {
// 'chunk' here is the node streaming chunk
// transfer-encoding chunks should be of the format:
// string(IntHexBase(chunk-size)) + ";chunk-signature=" +
@ -223,6 +258,8 @@ class V4Transform extends Transform {
}
// have metadata so reset unparsedChunk to remaining
// without metadata piece
// TODO Is that okay?
// @ts-expect-errors
unparsedChunk = parsedMetadataResults.unparsedChunk;
}
if (this.lastChunk) {
@ -269,7 +306,7 @@ class V4Transform extends Transform {
// final callback
err => {
if (err) {
return this.cb(err);
return this.cb(err as any);
}
// get next chunk
return callback();
@ -277,5 +314,3 @@ class V4Transform extends Transform {
);
}
}
module.exports = V4Transform;

View File

@ -1,32 +0,0 @@
const crypto = require('crypto');
const constants = require('../../../constants');
/**
* Constructs stringToSign for chunk
* @param {string} timestamp - date parsed from headers
* in ISO 8601 format: YYYYMMDDTHHMMSSZ
* @param {string} credentialScope - items from auth
* header plus the string 'aws4_request' joined with '/':
* timestamp/region/aws-service/aws4_request
* @param {string} lastSignature - signature from headers or prior chunk
* @param {string} justDataChunk - data portion of chunk
* @returns {string} stringToSign
*/
function constructChunkStringToSign(timestamp,
credentialScope, lastSignature, justDataChunk) {
let currentChunkHash;
// for last chunk, there will be no data, so use emptyStringHash
if (!justDataChunk) {
currentChunkHash = constants.emptyStringHash;
} else {
currentChunkHash = crypto.createHash('sha256');
currentChunkHash = currentChunkHash
.update(justDataChunk, 'binary').digest('hex');
}
return `AWS4-HMAC-SHA256-PAYLOAD\n${timestamp}\n` +
`${credentialScope}\n${lastSignature}\n` +
`${constants.emptyStringHash}\n${currentChunkHash}`;
}
module.exports = constructChunkStringToSign;

View File

@ -0,0 +1,35 @@
import * as crypto from 'crypto';
import * as constants from '../../../constants';
/**
* Constructs stringToSign for chunk
* @param timestamp - date parsed from headers
* in ISO 8601 format: YYYYMMDDTHHMMSSZ
* @param credentialScope - items from auth
* header plus the string 'aws4_request' joined with '/':
* timestamp/region/aws-service/aws4_request
* @param lastSignature - signature from headers or prior chunk
* @param justDataChunk - data portion of chunk
* @returns stringToSign
*/
export default function constructChunkStringToSign(
timestamp: string,
credentialScope: string,
lastSignature: string,
justDataChunk?: Buffer | string,
) {
let currentChunkHash: string;
// for last chunk, there will be no data, so use emptyStringHash
if (!justDataChunk) {
currentChunkHash = constants.emptyStringHash;
} else {
const hash = crypto.createHash('sha256');
const temp = justDataChunk instanceof Buffer
? hash.update(justDataChunk)
: hash.update(justDataChunk, 'binary');
currentChunkHash = temp.digest('hex');
}
return `AWS4-HMAC-SHA256-PAYLOAD\n${timestamp}\n` +
`${credentialScope}\n${lastSignature}\n` +
`${constants.emptyStringHash}\n${currentChunkHash}`;
}

View File

@ -1,12 +1,11 @@
'use strict'; // eslint-disable-line strict
import { Logger } from 'werelogs';
/**
* Convert timestamp to milliseconds since Unix Epoch
* @param {string} timestamp of ISO8601Timestamp format without
* @param timestamp of ISO8601Timestamp format without
* dashes or colons, e.g. 20160202T220410Z
* @return {number} number of milliseconds since Unix Epoch
*/
function convertAmzTimeToMs(timestamp) {
export function convertAmzTimeToMs(timestamp: string) {
const arr = timestamp.split('');
// Convert to YYYY-MM-DDTHH:mm:ss.sssZ
const ISO8601time = `${arr.slice(0, 4).join('')}-${arr[4]}${arr[5]}` +
@ -15,13 +14,12 @@ function convertAmzTimeToMs(timestamp) {
return Date.parse(ISO8601time);
}
/**
* Convert UTC timestamp to ISO 8601 timestamp
* @param {string} timestamp of UTC form: Fri, 10 Feb 2012 21:34:55 GMT
* @return {string} ISO8601 timestamp of form: YYYYMMDDTHHMMSSZ
* @param timestamp of UTC form: Fri, 10 Feb 2012 21:34:55 GMT
* @return ISO8601 timestamp of form: YYYYMMDDTHHMMSSZ
*/
function convertUTCtoISO8601(timestamp) {
export function convertUTCtoISO8601(timestamp: string | number) {
// convert to ISO string: YYYY-MM-DDTHH:mm:ss.sssZ.
const converted = new Date(timestamp).toISOString();
// Remove "-"s and "."s and milliseconds
@ -30,13 +28,13 @@ function convertUTCtoISO8601(timestamp) {
/**
* Check whether timestamp predates request or is too old
* @param {string} timestamp of ISO8601Timestamp format without
* @param timestamp of ISO8601Timestamp format without
* dashes or colons, e.g. 20160202T220410Z
* @param {number} expiry - number of seconds signature should be valid
* @param {object} log - log for request
* @return {boolean} true if there is a time problem
* @param expiry - number of seconds signature should be valid
* @param log - log for request
* @return true if there is a time problem
*/
function checkTimeSkew(timestamp, expiry, log) {
export function checkTimeSkew(timestamp: string, expiry: number, log: Logger) {
const currentTime = Date.now();
const fifteenMinutes = (15 * 60 * 1000);
const parsedTimestamp = convertAmzTimeToMs(timestamp);
@ -56,5 +54,3 @@ function checkTimeSkew(timestamp, expiry, log) {
}
return false;
}
module.exports = { convertAmzTimeToMs, convertUTCtoISO8601, checkTimeSkew };

View File

@ -1,17 +1,19 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../../lib/errors');
import { Logger } from 'werelogs';
import errors from '../../../lib/errors';
/**
* Validate Credentials
* @param {array} credentials - contains accessKey, scopeDate,
* @param credentials - contains accessKey, scopeDate,
* region, service, requestType
* @param {string} timestamp - timestamp from request in
* @param timestamp - timestamp from request in
* the format of ISO 8601: YYYYMMDDTHHMMSSZ
* @param {object} log - logging object
* @return {boolean} true if credentials are correct format, false if not
* @param log - logging object
*/
function validateCredentials(credentials, timestamp, log) {
export function validateCredentials(
credentials: [string, string, string, string, string],
timestamp: string,
log: Logger
): Error | {} {
if (!Array.isArray(credentials) || credentials.length !== 5) {
log.warn('credentials in improper format', { credentials });
return errors.InvalidArgument;
@ -58,12 +60,21 @@ function validateCredentials(credentials, timestamp, log) {
/**
* Extract and validate components from query object
* @param {object} queryObj - query object from request
* @param {object} log - logging object
* @return {object} object containing extracted query params for authV4
* @param queryObj - query object from request
* @param log - logging object
* @return object containing extracted query params for authV4
*/
function extractQueryParams(queryObj, log) {
const authParams = {};
export function extractQueryParams(
queryObj: { [key: string]: string | undefined },
log: Logger
) {
const authParams: {
signedHeaders?: string;
signatureFromRequest?: string;
timestamp?: string;
expiry?: number;
credential?: [string, string, string, string, string];
} = {};
// Do not need the algorithm sent back
if (queryObj['X-Amz-Algorithm'] !== 'AWS4-HMAC-SHA256') {
@ -99,7 +110,7 @@ function extractQueryParams(queryObj, log) {
return authParams;
}
const expiry = Number.parseInt(queryObj['X-Amz-Expires'], 10);
const expiry = Number.parseInt(queryObj['X-Amz-Expires'] ?? 'nope', 10);
const sevenDays = 604800;
if (expiry && (expiry > 0 && expiry <= sevenDays)) {
authParams.expiry = expiry;
@ -110,6 +121,7 @@ function extractQueryParams(queryObj, log) {
const credential = queryObj['X-Amz-Credential'];
if (credential && credential.length > 28 && credential.indexOf('/') > -1) {
// @ts-ignore
authParams.credential = credential.split('/');
} else {
log.warn('invalid credential param', { credential });
@ -121,14 +133,17 @@ function extractQueryParams(queryObj, log) {
/**
* Extract and validate components from auth header
* @param {string} authHeader - authorization header from request
* @param {object} log - logging object
* @return {object} object containing extracted auth header items for authV4
* @param authHeader - authorization header from request
* @param log - logging object
* @return object containing extracted auth header items for authV4
*/
function extractAuthItems(authHeader, log) {
const authItems = {};
const authArray = authHeader
.replace('AWS4-HMAC-SHA256 ', '').split(',');
export function extractAuthItems(authHeader: string, log: Logger) {
const authItems: {
credentialsArr?: [string, string, string, string, string];
signedHeaders?: string;
signatureFromRequest?: string;
} = {};
const authArray = authHeader.replace('AWS4-HMAC-SHA256 ', '').split(',');
if (authArray.length < 3) {
return authItems;
@ -138,8 +153,12 @@ function extractAuthItems(authHeader, log) {
const signedHeadersStr = authArray[1];
const signatureStr = authArray[2];
log.trace('credentials from request', { credentialStr });
if (credentialStr && credentialStr.trim().startsWith('Credential=')
&& credentialStr.indexOf('/') > -1) {
if (
credentialStr &&
credentialStr.trim().startsWith('Credential=') &&
credentialStr.indexOf('/') > -1
) {
// @ts-ignore
authItems.credentialsArr = credentialStr
.trim().replace('Credential=', '').split('/');
} else {
@ -166,11 +185,11 @@ function extractAuthItems(authHeader, log) {
/**
* Checks whether the signed headers include the host header
* and all x-amz- and x-scal- headers in request
* @param {string} signedHeaders - signed headers sent with request
* @param {object} allHeaders - request.headers
* @return {boolean} true if all x-amz-headers included and false if not
* @param signedHeaders - signed headers sent with request
* @param allHeaders - request.headers
* @return true if all x-amz-headers included and false if not
*/
function areSignedHeadersComplete(signedHeaders, allHeaders) {
export function areSignedHeadersComplete(signedHeaders: string, allHeaders: Headers) {
const signedHeadersList = signedHeaders.split(';');
if (signedHeadersList.indexOf('host') === -1) {
return false;
@ -185,6 +204,3 @@ function areSignedHeadersComplete(signedHeaders, allHeaders) {
}
return true;
}
module.exports = { validateCredentials, extractQueryParams,
areSignedHeadersComplete, extractAuthItems };

View File

@ -0,0 +1,569 @@
import cluster, { Worker } from 'cluster';
import * as werelogs from 'werelogs';
import { default as errors } from '../../lib/errors';
const rpcLogger = new werelogs.Logger('ClusterRPC');
/**
* Remote procedure calls support between cluster workers.
*
* When using the cluster module, new processes are forked and are
* dispatched workloads, usually HTTP requests. The ClusterRPC module
* implements a RPC system to send commands to all cluster worker
* processes at once from any particular worker, and retrieve their
* individual command results, like a distributed map operation.
*
* The existing nodejs cluster IPC channel is setup from the primary
* to each worker, but not between workers, so there has to be a hop
* by the primary.
*
* How a command is treated:
*
* - a worker sends a command message to the primary
*
* - the primary then forwards that command to each existing worker
* (including the requestor)
*
* - each worker then executes the command and returns a result or an
* error
*
* - the primary gathers all workers results into an array
*
* - finally, the primary dispatches the results array to the original
* requesting worker
*
*
* Limitations:
*
* - The command payload must be serializable, which means that:
* - it should not contain circular references
* - it should be of a reasonable size to be sent in a single RPC message
*
* - The "toWorkers" parameter of value "*" targets the set of workers
* that are available at the time the command is dispatched. Any new
* worker spawned after the command has been dispatched for
* processing, but before the command completes, don't execute
* the command and hence are not part of the results array.
*
*
* To set it up:
*
* - On the primary:
* if (cluster.isPrimary) {
* setupRPCPrimary();
* }
*
* - On the workers:
* if (!cluster.isPrimary) {
* setupRPCWorker({
* handler1: (payload: object, uids: string, callback: HandlerCallback) => void,
* handler2: ...
* });
* }
* Handler functions will be passed the command payload, request
* serialized uids, and must call the callback when the worker is done
* processing the command:
* callback(error: Error | null | undefined, result?: any)
*
* When this setup is done, any worker can start sending commands by calling
* the async function sendWorkerCommand().
*/
// exported types
export type ResultObject = {
error: Error | null;
result: any;
};
/**
* saved Promise for sendWorkerCommand
*/
export type CommandPromise = {
resolve: (results?: ResultObject[]) => void;
reject: (error: Error) => void;
timeout: NodeJS.Timeout | null;
};
export type HandlerCallback = (error: (Error & { code?: number }) | null | undefined, result?: any) => void;
export type HandlerFunction = (payload: object, uids: string, callback: HandlerCallback) => void;
export type HandlersMap = {
[index: string]: HandlerFunction;
};
export type PrimaryHandlerFunction = (worker: Worker, payload: object, uids: string, callback: HandlerCallback) => void;
export type PrimaryHandlersMap = Record<string, PrimaryHandlerFunction>;
// private types
type RPCMessage<T extends string, P> = {
type: T;
uids: string;
payload: P;
};
type RPCCommandMessage = RPCMessage<'cluster-rpc:command', any> & {
toWorkers: string;
toHandler: string;
};
type MarshalledResultObject = {
error: string | null;
errorCode?: number;
result: any;
};
type RPCCommandResultMessage = RPCMessage<'cluster-rpc:commandResult', MarshalledResultObject>;
type RPCCommandResultsMessage = RPCMessage<'cluster-rpc:commandResults', {
results: MarshalledResultObject[];
}>;
type RPCCommandErrorMessage = RPCMessage<'cluster-rpc:commandError', {
error: string;
}>;
interface RPCSetupOptions {
/**
* As werelogs is not a peerDependency, arsenal and a parent project
* might have their own separate versions duplicated in dependencies.
* The config are therefore not shared.
* Use this to propagate werelogs config to arsenal's ClusterRPC.
*/
werelogsConfig?: Parameters<typeof werelogs.configure>[0];
};
/**
* In primary: store worker IDs that are waiting to be dispatched
* their command's results, as a mapping.
*/
const uidsToWorkerId: {
[index: string]: number;
} = {};
/**
* In primary: store worker responses for commands in progress as a
* mapping.
*
* Result objects are 'null' while the worker is still processing the
* command. When a worker finishes processing it stores the result as:
* {
* error: string | null,
* result: any
* }
*/
const uidsToCommandResults: {
[index: string]: {
[index: number]: MarshalledResultObject | null;
};
} = {};
/**
* In workers: store promise callbacks for commands waiting to be
* dispatched, as a mapping.
*/
const uidsToCommandPromise: {
[index: string]: CommandPromise;
} = {};
function _isRpcMessage(message) {
return (message !== null &&
typeof message === 'object' &&
typeof message.type === 'string' &&
message.type.startsWith('cluster-rpc:'));
}
/**
* Setup cluster RPC system on the primary
*
* @param {object} [handlers] - mapping of handler names to handler functions
* handler function:
* `handler({Worker} worker, {object} payload, {string} uids, {function} callback)`
* handler callback must be called when worker is done with the command:
* `callback({Error|null} error, {any} [result])`
* @return {undefined}
*/
export function setupRPCPrimary(handlers?: PrimaryHandlersMap, options?: RPCSetupOptions) {
if (options?.werelogsConfig) {
werelogs.configure(options.werelogsConfig);
}
cluster.on('message', (worker, message) => {
if (_isRpcMessage(message)) {
_handlePrimaryMessage(worker, message, handlers);
}
});
}
/**
* Setup RPCs on a cluster worker process
*
* @param {object} handlers - mapping of handler names to handler functions
* handler function:
* handler({object} payload, {string} uids, {function} callback)
* handler callback must be called when worker is done with the command:
* callback({Error|null} error, {any} [result])
* @return {undefined}
* }
*/
export function setupRPCWorker(handlers: HandlersMap, options?: RPCSetupOptions) {
if (!process.send) {
throw new Error('fatal: cannot setup cluster RPC: "process.send" is not available');
}
if (options?.werelogsConfig) {
werelogs.configure(options.werelogsConfig);
}
process.on('message', (message: RPCCommandMessage | RPCCommandResultsMessage) => {
if (_isRpcMessage(message)) {
_handleWorkerMessage(message, handlers);
}
});
}
/**
* Send a command for workers to execute in parallel, and wait for results
*
* @param {string} toWorkers - which workers should execute the command
* Currently the supported values are:
* - "*", meaning all workers will execute the command
* - "PRIMARY", meaning primary process will execute the command
* @param {string} toHandler - name of handler that will execute the
* command in workers, as declared in setupRPCWorker() parameter object
* @param {string} uids - unique identifier of the command, must be
* unique across all commands in progress
* @param {object} payload - message payload, sent as-is to the handler
* @param {number} [timeoutMs=60000] - timeout the command with a
* "RequestTimeout" error after this number of milliseconds - set to 0
* to disable timeouts (the command may then hang forever)
* @returns {Promise}
*/
export async function sendWorkerCommand(
toWorkers: string,
toHandler: string,
uids: string,
payload: object,
timeoutMs: number = 60000
) {
if (typeof uids !== 'string') {
rpcLogger.error('missing or invalid "uids" field', { uids });
throw errors.MissingParameter;
}
if (uidsToCommandPromise[uids] !== undefined) {
rpcLogger.error('a command is already in progress with same uids', { uids });
throw errors.OperationAborted;
}
rpcLogger.info('sending command', { toWorkers, toHandler, uids, payload });
return new Promise((resolve, reject) => {
let timeout: NodeJS.Timeout | null = null;
if (timeoutMs) {
timeout = setTimeout(() => {
delete uidsToCommandPromise[uids];
reject(errors.RequestTimeout);
}, timeoutMs);
}
uidsToCommandPromise[uids] = { resolve, reject, timeout };
const message: RPCCommandMessage = {
type: 'cluster-rpc:command',
toWorkers,
toHandler,
uids,
payload,
};
return process.send?.(message);
});
}
/**
* Get the number of commands in flight
* @returns {number}
*/
export function getPendingCommandsCount() {
return Object.keys(uidsToCommandPromise).length;
}
function _dispatchCommandResultsToWorker(
worker: Worker,
uids: string,
resultsArray: MarshalledResultObject[]
): void {
const message: RPCCommandResultsMessage = {
type: 'cluster-rpc:commandResults',
uids,
payload: {
results: resultsArray,
},
};
worker.send(message);
}
function _dispatchCommandErrorToWorker(
worker: Worker,
uids: string,
error: Error,
): void {
const message: RPCCommandErrorMessage = {
type: 'cluster-rpc:commandError',
uids,
payload: {
error: error.message,
},
};
worker.send(message);
}
function _sendPrimaryCommandResult(
worker: Worker,
uids: string,
error: (Error & { code?: number }) | null | undefined,
result?: any
): void {
const message: RPCCommandResultsMessage = {
type: 'cluster-rpc:commandResults',
uids,
payload: {
results: [{ error: error?.message || null, errorCode: error?.code, result }],
},
};
worker.send?.(message);
}
function _handlePrimaryCommandMessage(
fromWorker: Worker,
logger: any,
message: RPCCommandMessage,
handlers?: PrimaryHandlersMap
): void {
const { toWorkers, toHandler, uids, payload } = message;
if (toWorkers === '*') {
if (uidsToWorkerId[uids] !== undefined) {
logger.warn('new command already has a waiting worker with same uids', {
uids, workerId: uidsToWorkerId[uids],
});
return undefined;
}
const commandResults = {};
for (const workerId of Object.keys(cluster.workers || {})) {
commandResults[workerId] = null;
}
uidsToWorkerId[uids] = fromWorker?.id;
uidsToCommandResults[uids] = commandResults;
for (const [workerId, worker] of Object.entries(cluster.workers || {})) {
logger.debug('sending command message to worker', {
workerId, toHandler, payload,
});
if (worker) {
worker.send(message);
}
}
} else if (toWorkers === 'PRIMARY') {
const { toHandler, uids, payload } = message;
const cb: HandlerCallback = (err, result) => _sendPrimaryCommandResult(fromWorker, uids, err, result);
if (toHandler in (handlers || {})) {
return handlers![toHandler](fromWorker, payload, uids, cb);
}
logger.error('no such handler in "toHandler" field from worker command message', {
toHandler,
});
return cb(errors.NotImplemented);
} else {
logger.error('unsupported "toWorkers" field from worker command message', {
toWorkers,
});
if (fromWorker) {
_dispatchCommandErrorToWorker(fromWorker, uids, errors.NotImplemented);
}
}
}
function _handlePrimaryCommandResultMessage(
fromWorkerId: number,
logger: any,
message: RPCCommandResultMessage
): void {
const { uids, payload } = message;
const commandResults = uidsToCommandResults[uids];
if (!commandResults) {
logger.warn('received command response message from worker for command not in flight', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
if (commandResults[fromWorkerId] === undefined) {
logger.warn('received command response message with unexpected worker ID', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
if (commandResults[fromWorkerId] !== null) {
logger.warn('ignoring duplicate command response from worker', {
workerId: fromWorkerId,
uids,
});
return undefined;
}
commandResults[fromWorkerId] = payload;
const commandResultsArray = Object.values(commandResults);
if (commandResultsArray.every(response => response !== null)) {
logger.debug('all workers responded to command', { uids });
const completeCommandResultsArray = <MarshalledResultObject[]> commandResultsArray;
const toWorkerId = uidsToWorkerId[uids];
const toWorker = cluster.workers?.[toWorkerId];
delete uidsToCommandResults[uids];
delete uidsToWorkerId[uids];
if (!toWorker) {
logger.warn('worker shut down while its command was executing', {
workerId: toWorkerId, uids,
});
return undefined;
}
// send back response to original worker
_dispatchCommandResultsToWorker(toWorker, uids, completeCommandResultsArray);
}
}
function _handlePrimaryMessage(
fromWorker: Worker,
message: RPCCommandMessage | RPCCommandResultMessage,
handlers?: PrimaryHandlersMap
): void {
const { type: messageType, uids } = message;
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
logger.debug('primary received message from worker', {
workerId: fromWorker?.id, rpcMessage: message,
});
if (messageType === 'cluster-rpc:command') {
return _handlePrimaryCommandMessage(fromWorker, logger, message, handlers);
}
if (messageType === 'cluster-rpc:commandResult') {
return _handlePrimaryCommandResultMessage(fromWorker?.id, logger, message);
}
logger.error('unsupported message type', {
workerId: fromWorker?.id, messageType, uids,
});
return undefined;
}
function _sendWorkerCommandResult(
uids: string,
error: Error | null | undefined,
result?: any
): void {
const message: RPCCommandResultMessage = {
type: 'cluster-rpc:commandResult',
uids,
payload: {
error: error ? error.message : null,
result,
},
};
process.send?.(message);
}
function _handleWorkerCommandMessage(
logger: any,
message: RPCCommandMessage,
handlers: HandlersMap
): void {
const { toHandler, uids, payload } = message;
const cb: HandlerCallback = (err, result) => _sendWorkerCommandResult(uids, err, result);
if (toHandler in handlers) {
return handlers[toHandler](payload, uids, cb);
}
logger.error('no such handler in "toHandler" field from worker command message', {
toHandler,
});
return cb(errors.NotImplemented);
}
function _handleWorkerCommandResultsMessage(
logger: any,
message: RPCCommandResultsMessage,
): void {
const { uids, payload } = message;
const { results } = payload;
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
if (commandPromise === undefined) {
logger.error('missing promise for command results', { uids, payload });
return undefined;
}
if (commandPromise.timeout) {
clearTimeout(commandPromise.timeout);
}
delete uidsToCommandPromise[uids];
const unmarshalledResults = results.map(workerResult => {
let workerError: Error | null = null;
if (workerResult.error) {
if (workerResult.error in errors) {
workerError = errors[workerResult.error];
} else {
workerError = new Error(workerResult.error);
}
}
if (workerError && workerResult.errorCode) {
(workerError as Error & { code: number }).code = workerResult.errorCode;
}
const unmarshalledResult: ResultObject = {
error: workerError,
result: workerResult.result,
};
return unmarshalledResult;
});
return commandPromise.resolve(unmarshalledResults);
}
function _handleWorkerCommandErrorMessage(
logger: any,
message: RPCCommandErrorMessage,
): void {
const { uids, payload } = message;
const { error } = payload;
const commandPromise: CommandPromise = uidsToCommandPromise[uids];
if (commandPromise === undefined) {
logger.error('missing promise for command results', { uids, payload });
return undefined;
}
if (commandPromise.timeout) {
clearTimeout(commandPromise.timeout);
}
delete uidsToCommandPromise[uids];
let commandError: Error | null = null;
if (error in errors) {
commandError = errors[error];
} else {
commandError = new Error(error);
}
return commandPromise.reject(<Error> commandError);
}
function _handleWorkerMessage(
message: RPCCommandMessage | RPCCommandResultsMessage | RPCCommandErrorMessage,
handlers: HandlersMap
): void {
const { type: messageType, uids } = message;
const workerId = cluster.worker?.id;
const logger = rpcLogger.newRequestLoggerFromSerializedUids(uids);
logger.debug('worker received message from primary', {
workerId, rpcMessage: message,
});
if (messageType === 'cluster-rpc:command') {
return _handleWorkerCommandMessage(logger, message, handlers);
}
if (messageType === 'cluster-rpc:commandResults') {
return _handleWorkerCommandResultsMessage(logger, message);
}
if (messageType === 'cluster-rpc:commandError') {
return _handleWorkerCommandErrorMessage(logger, message);
}
logger.error('unsupported message type', {
workerId, messageType,
});
return undefined;
}

View File

@ -1,151 +0,0 @@
'use strict'; // eslint-disable-line strict
const crypto = require('crypto');
// The min value here is to manage further backward compat if we
// need it
// Default value
const vaultGeneratedIamSecurityTokenSizeMin = 128;
// Safe to assume that a typical token size is less than 8192 bytes
const vaultGeneratedIamSecurityTokenSizeMax = 8192;
// Base-64
const vaultGeneratedIamSecurityTokenPattern = /^[A-Za-z0-9/+=]*$/;
module.exports = {
// info about the iam security token
iamSecurityToken: {
min: vaultGeneratedIamSecurityTokenSizeMin,
max: vaultGeneratedIamSecurityTokenSizeMax,
pattern: vaultGeneratedIamSecurityTokenPattern,
},
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
// only public resources
publicId: 'http://acs.amazonaws.com/groups/global/AllUsers',
zenkoServiceAccount: 'http://acs.zenko.io/accounts/service',
metadataFileNamespace: '/MDFile',
dataFileURL: '/DataFile',
passthroughFileURL: '/PassthroughFile',
// AWS states max size for user-defined metadata
// (x-amz-meta- headers) is 2 KB:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
// In testing, AWS seems to allow up to 88 more bytes,
// so we do the same.
maximumMetaHeadersSize: 2136,
emptyFileMd5: 'd41d8cd98f00b204e9800998ecf8427e',
// Version 2 changes the format of the data location property
// Version 3 adds the dataStoreName attribute
// Version 4 add the Creation-Time and Content-Language attributes,
// and add support for x-ms-meta-* headers in UserMetadata
// Version 5 adds the azureInfo structure
mdModelVersion: 5,
/*
* Splitter is used to build the object name for the overview of a
* multipart upload and to build the object names for each part of a
* multipart upload. These objects with large names are then stored in
* metadata in a "shadow bucket" to a real bucket. The shadow bucket
* contains all ongoing multipart uploads. We include in the object
* name some of the info we might need to pull about an open multipart
* upload or about an individual part with each piece of info separated
* by the splitter. We can then extract each piece of info by splitting
* the object name string with this splitter.
* For instance, assuming a splitter of '...!*!',
* the name of the upload overview would be:
* overview...!*!objectKey...!*!uploadId
* For instance, the name of a part would be:
* uploadId...!*!partNumber
*
* The sequence of characters used in the splitter should not occur
* elsewhere in the pieces of info to avoid splitting where not
* intended.
*
* Splitter is also used in adding bucketnames to the
* namespacerusersbucket. The object names added to the
* namespaceusersbucket are of the form:
* canonicalID...!*!bucketname
*/
splitter: '..|..',
usersBucket: 'users..bucket',
// MPU Bucket Prefix is used to create the name of the shadow
// bucket used for multipart uploads. There is one shadow mpu
// bucket per bucket and its name is the mpuBucketPrefix followed
// by the name of the final destination bucket for the object
// once the multipart upload is complete.
mpuBucketPrefix: 'mpuShadowBucket',
// since aws s3 does not allow capitalized buckets, these may be
// used for special internal purposes
permittedCapitalizedBuckets: {
METADATA: true,
},
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
objectKeyByteLimit: 915,
/* delimiter for location-constraint. The location constraint will be able
* to include the ingestion flag
*/
zenkoSeparator: ':',
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true },
replicationBackends: { aws_s3: true, azure: true, gcp: true },
// hex digest of sha256 hash of empty string:
emptyStringHash: crypto.createHash('sha256')
.update('', 'binary').digest('hex'),
mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
// AWS sets a minimum size limit for parts except for the last part.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
minimumAllowedPartSize: 5242880,
gcpMaximumAllowedPartCount: 1024,
// GCP Object Tagging Prefix
gcpTaggingPrefix: 'aws-tag-',
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
legacyLocations: ['sproxyd', 'legacy'],
// healthcheck default call from nginx is every 2 seconds
// for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call
externalBackendHealthCheckInterval: 60000,
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
clientsRequireStringKey: { sproxyd: true, cdmi: true },
hasCopyPartBackends: { aws_s3: true, gcp: true },
versioningNotImplBackends: { azure: true, gcp: true },
// user metadata applied on zenko-created objects
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
// Default expiration value of the S3 pre-signed URL duration
// 604800 seconds (seven days).
defaultPreSignedURLExpiry: 7 * 24 * 60 * 60,
// Regex for ISO-8601 formatted date
shortIso8601Regex: /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/,
longIso8601Regex: /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z/,
supportedNotificationEvents: new Set([
's3:ObjectCreated:*',
's3:ObjectCreated:Put',
's3:ObjectCreated:Copy',
's3:ObjectCreated:CompleteMultipartUpload',
's3:ObjectRemoved:*',
's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated',
]),
notificationArnPrefix: 'arn:scality:bucketnotif',
// HTTP server keep-alive timeout is set to a higher value than
// client's free sockets timeout to avoid the risk of triggering
// ECONNRESET errors if the server closes the connection at the
// exact moment clients attempt to reuse an established connection
// for a new request.
//
// Note: the ability to close inactive connections on the client
// after httpClientFreeSocketsTimeout milliseconds requires the
// use of "agentkeepalive" module instead of the regular node.js
// http.Agent.
httpServerKeepAliveTimeout: 60000,
httpClientFreeSocketTimeout: 55000,
supportedLifecycleRules: [
'expiration',
'noncurrentVersionExpiration',
'abortIncompleteMultipartUpload',
],
};

177
lib/constants.ts Normal file
View File

@ -0,0 +1,177 @@
import * as crypto from 'crypto';
// The min value here is to manage further backward compat if we
// need it
// Default value
export const vaultGeneratedIamSecurityTokenSizeMin = 128;
// Safe to assume that a typical token size is less than 8192 bytes
export const vaultGeneratedIamSecurityTokenSizeMax = 8192;
// Base-64
export const vaultGeneratedIamSecurityTokenPattern = /^[A-Za-z0-9/+=]*$/;
// info about the iam security token
export const iamSecurityToken = {
min: vaultGeneratedIamSecurityTokenSizeMin,
max: vaultGeneratedIamSecurityTokenSizeMax,
pattern: vaultGeneratedIamSecurityTokenPattern,
};
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
// only public resources
export const publicId = 'http://acs.amazonaws.com/groups/global/AllUsers';
export const zenkoServiceAccount = 'http://acs.zenko.io/accounts/service';
export const metadataFileNamespace = '/MDFile';
export const dataFileURL = '/DataFile';
export const passthroughFileURL = '/PassthroughFile';
// AWS states max size for user-defined metadata
// (x-amz-meta- headers) is 2 KB:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
// In testing, AWS seems to allow up to 88 more bytes,
// so we do the same.
export const maximumMetaHeadersSize = 2136;
export const emptyFileMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
// Version 2 changes the format of the data location property
// Version 3 adds the dataStoreName attribute
// Version 4 add the Creation-Time and Content-Language attributes,
// and add support for x-ms-meta-* headers in UserMetadata
// Version 5 adds the azureInfo structure
// Version 6 adds a "deleted" flag that is updated to true before
// the object gets deleted. This is done to keep object metadata in the
// oplog when deleting the object, as oplog deletion events don't contain
// any metadata of the object.
// version 6 also adds the "isPHD" flag that is used to indicate that the master
// object is a placeholder and is not up to date.
export const mdModelVersion = 6;
/*
* Splitter is used to build the object name for the overview of a
* multipart upload and to build the object names for each part of a
* multipart upload. These objects with large names are then stored in
* metadata in a "shadow bucket" to a real bucket. The shadow bucket
* contains all ongoing multipart uploads. We include in the object
* name some of the info we might need to pull about an open multipart
* upload or about an individual part with each piece of info separated
* by the splitter. We can then extract each piece of info by splitting
* the object name string with this splitter.
* For instance, assuming a splitter of '...!*!',
* the name of the upload overview would be:
* overview...!*!objectKey...!*!uploadId
* For instance, the name of a part would be:
* uploadId...!*!partNumber
*
* The sequence of characters used in the splitter should not occur
* elsewhere in the pieces of info to avoid splitting where not
* intended.
*
* Splitter is also used in adding bucketnames to the
* namespacerusersbucket. The object names added to the
* namespaceusersbucket are of the form:
* canonicalID...!*!bucketname
*/
export const splitter = '..|..';
export const usersBucket = 'users..bucket';
// MPU Bucket Prefix is used to create the name of the shadow
// bucket used for multipart uploads. There is one shadow mpu
// bucket per bucket and its name is the mpuBucketPrefix followed
// by the name of the final destination bucket for the object
// once the multipart upload is complete.
export const mpuBucketPrefix = 'mpuShadowBucket';
// since aws s3 does not allow capitalized buckets, these may be
// used for special internal purposes
export const permittedCapitalizedBuckets = {
METADATA: true,
};
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
export const objectKeyByteLimit = 915;
/* delimiter for location-constraint. The location constraint will be able
* to include the ingestion flag
*/
export const zenkoSeparator = ':';
/* eslint-disable camelcase */
export const externalBackends = { aws_s3: true, azure: true, gcp: true, pfs: true };
export const replicationBackends = { aws_s3: true, azure: true, gcp: true };
// hex digest of sha256 hash of empty string:
export const emptyStringHash = crypto.createHash('sha256')
.update('', 'binary').digest('hex');
export const mpuMDStoredExternallyBackend = { aws_s3: true, gcp: true };
// AWS sets a minimum size limit for parts except for the last part.
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
export const minimumAllowedPartSize = 5242880;
export const gcpMaximumAllowedPartCount = 1024;
// GCP Object Tagging Prefix
export const gcpTaggingPrefix = 'aws-tag-';
export const productName = 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko';
export const legacyLocations = ['sproxyd', 'legacy'];
// healthcheck default call from nginx is every 2 seconds
// for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call
export const externalBackendHealthCheckInterval = 60000;
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
export const clientsRequireStringKey = { sproxyd: true, cdmi: true };
export const hasCopyPartBackends = { aws_s3: true, gcp: true };
export const versioningNotImplBackends = { azure: true, gcp: true };
// user metadata applied on zenko-created objects
export const zenkoIDHeader = 'x-amz-meta-zenko-instance-id';
// Default expiration value of the S3 pre-signed URL duration
// 604800 seconds (seven days).
export const defaultPreSignedURLExpiry = 7 * 24 * 60 * 60;
// Regex for ISO-8601 formatted date
export const shortIso8601Regex = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z/;
export const longIso8601Regex = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z/;
export const supportedNotificationEvents = new Set([
's3:ObjectCreated:*',
's3:ObjectCreated:Put',
's3:ObjectCreated:Copy',
's3:ObjectCreated:CompleteMultipartUpload',
's3:ObjectRemoved:*',
's3:ObjectRemoved:Delete',
's3:ObjectRemoved:DeleteMarkerCreated',
's3:Replication:OperationFailedReplication',
's3:ObjectTagging:*',
's3:ObjectTagging:Put',
's3:ObjectTagging:Delete',
's3:ObjectAcl:Put',
's3:ObjectRestore:*',
's3:ObjectRestore:Post',
's3:ObjectRestore:Completed',
's3:ObjectRestore:Delete',
's3:LifecycleTransition',
's3:LifecycleExpiration:*',
's3:LifecycleExpiration:DeleteMarkerCreated',
's3:LifecycleExpiration:Delete',
]);
export const notificationArnPrefix = 'arn:scality:bucketnotif';
// HTTP server keep-alive timeout is set to a higher value than
// client's free sockets timeout to avoid the risk of triggering
// ECONNRESET errors if the server closes the connection at the
// exact moment clients attempt to reuse an established connection
// for a new request.
//
// Note: the ability to close inactive connections on the client
// after httpClientFreeSocketsTimeout milliseconds requires the
// use of "agentkeepalive" module instead of the regular node.js
// http.Agent.
export const httpServerKeepAliveTimeout = 60000;
export const httpClientFreeSocketTimeout = 55000;
export const supportedLifecycleRules = [
'expiration',
'noncurrentVersionExpiration',
'abortIncompleteMultipartUpload',
'transitions',
'noncurrentVersionTransition',
];
// Maximum number of buckets to cache (bucket metadata)
export const maxCachedBuckets = process.env.METADATA_MAX_CACHED_BUCKETS ?
Number(process.env.METADATA_MAX_CACHED_BUCKETS) : 1000;
export const validRestoreObjectTiers = new Set(['Expedited', 'Standard', 'Bulk']);
export const maxBatchingConcurrentOperations = 5;
/** For policy resource arn check we allow empty account ID to not break compatibility */
export const policyArnAllowedEmptyAccountId = ['utapi', 'scuba'];

View File

@ -1,7 +1,3 @@
'use strict'; // eslint-disable-line strict
const writeOptions = { sync: true };
/**
* Like Error, but with a property set to true.
* TODO: this is copied from kineticlib, should consolidate with the
@ -14,29 +10,36 @@ const writeOptions = { sync: true };
* use:
* throw propError("badTypeInput", "input is not a buffer");
*
* @param {String} propName - the property name.
* @param {String} message - the Error message.
* @returns {Error} the Error object.
* @param propName - the property name.
* @param message - the Error message.
* @returns the Error object.
*/
function propError(propName, message) {
function propError(propName: string, message: string): Error {
const err = new Error(message);
err[propName] = true;
// @ts-ignore
err.is = { [propName]: true };
return err;
}
/**
* Running transaction with multiple updates to be committed atomically
*/
class IndexTransaction {
export class IndexTransaction {
operations: { type: 'put' | 'del'; key: string; value?: any }[];
db: any;
closed: boolean;
conditions: { [key: string]: string }[];
/**
* Builds a new transaction
*
* @argument {Leveldb} db an open database to which the updates
* will be applied
*
* @returns {IndexTransaction} a new empty transaction
* @returns a new empty transaction
*/
constructor(db) {
constructor(db: any) {
this.operations = [];
this.db = db;
this.closed = false;
@ -46,30 +49,34 @@ class IndexTransaction {
/**
* Adds a new operation to participate in this running transaction
*
* @argument {object} op an object with the following attributes:
* @argument op an object with the following attributes:
* {
* type: 'put' or 'del',
* key: the object key,
* value: (optional for del) the value to store,
* }
*
* @throws {Error} an error described by the following properties
* @throws an error described by the following properties
* - invalidTransactionVerb if op is not put or del
* - pushOnCommittedTransaction if already committed
* - missingKey if the key is missing from the op
* - missingValue if putting without a value
*
* @returns {undefined}
*/
push(op) {
push(op: { type: 'put'; key: string; value: any }): void;
push(op: { type: 'del'; key: string }): void;
push(op: { type: 'put' | 'del'; key: string; value?: any }): void {
if (this.closed) {
throw propError('pushOnCommittedTransaction',
'can not add ops to already committed transaction');
throw propError(
'pushOnCommittedTransaction',
'can not add ops to already committed transaction'
);
}
if (op.type !== 'put' && op.type !== 'del') {
throw propError('invalidTransactionVerb',
`unknown action type: ${op.type}`);
throw propError(
'invalidTransactionVerb',
`unknown action type: ${op.type}`
);
}
if (op.key === undefined) {
@ -93,57 +100,59 @@ class IndexTransaction {
* - pushOnCommittedTransaction if already committed
* - missingKey if the key is missing from the op
* - missingValue if putting without a value
*
* @returns {undefined}
*
* @see push
*/
put(key, value) {
put(key: string, value: any) {
this.push({ type: 'put', key, value });
}
/**
* Adds a new del operation to this running transaction
*
* @argument {string} key - the key of the object to delete
* @argument key - the key of the object to delete
*
* @throws {Error} an error described by the following properties
* @throws an error described by the following properties
* - pushOnCommittedTransaction if already committed
* - missingKey if the key is missing from the op
*
* @returns {undefined}
*
* @see push
*/
del(key) {
del(key: string) {
this.push({ type: 'del', key });
}
/**
* Adds a condition for the transaction
*
* @argument {object} condition an object with the following attributes:
* @argument condition an object with the following attributes:
* {
* <condition>: the object key
* }
* example: { notExists: 'key1' }
*
* @throws {Error} an error described by the following properties
* @throws an error described by the following properties
* - pushOnCommittedTransaction if already committed
* - missingCondition if the condition is empty
*
* @returns {undefined}
*/
addCondition(condition) {
addCondition(condition: { [key: string]: string }) {
if (this.closed) {
throw propError('pushOnCommittedTransaction',
'can not add conditions to already committed transaction');
throw propError(
'pushOnCommittedTransaction',
'can not add conditions to already committed transaction'
);
}
if (condition === undefined || Object.keys(condition).length === 0) {
throw propError('missingCondition', 'missing condition for conditional put');
throw propError(
'missingCondition',
'missing condition for conditional put'
);
}
if (typeof (condition.notExists) !== 'string') {
throw propError('unsupportedConditionalOperation', 'missing key or supported condition');
if (typeof condition.notExists !== 'string' && typeof condition.exists !== 'string') {
throw propError(
'unsupportedConditionalOperation',
'missing key or supported condition'
);
}
this.conditions.push(condition);
}
@ -151,32 +160,35 @@ class IndexTransaction {
/**
* Applies the queued updates in this transaction atomically.
*
* @argument {function} cb function to be called when the commit
* @argument cb function to be called when the commit
* finishes, taking an optional error argument
*
* @returns {undefined}
*/
commit(cb) {
commit(cb: (error: Error | null, data?: any) => void) {
if (this.closed) {
return cb(propError('alreadyCommitted',
'transaction was already committed'));
return cb(
propError(
'alreadyCommitted',
'transaction was already committed'
)
);
}
if (this.operations.length === 0) {
return cb(propError('emptyTransaction',
'tried to commit an empty transaction'));
return cb(
propError(
'emptyTransaction',
'tried to commit an empty transaction'
)
);
}
this.closed = true;
writeOptions.conditions = this.conditions;
const options = { sync: true, conditions: this.conditions };
// The array-of-operations variant of the `batch` method
// allows passing options such has `sync: true` whereas the
// chained form does not.
return this.db.batch(this.operations, writeOptions, cb);
return this.db.batch(this.operations, options, cb);
}
}
module.exports = {
IndexTransaction,
};

View File

@ -1,13 +0,0 @@
function reshapeExceptionError(error) {
const { message, code, stack, name } = error;
return {
message,
code,
stack,
name,
};
}
module.exports = {
reshapeExceptionError,
};

11
lib/errorUtils.ts Normal file
View File

@ -0,0 +1,11 @@
export interface ErrorLike {
message: any;
code: any;
stack: any;
name: any;
}
export function reshapeExceptionError(error: ErrorLike) {
const { message, code, stack, name } = error;
return { message, code, stack, name };
}

View File

@ -1,87 +0,0 @@
'use strict'; // eslint-disable-line strict
/**
* ArsenalError
*
* @extends {Error}
*/
class ArsenalError extends Error {
/**
* constructor.
*
* @param {string} type - Type of error or message
* @param {number} code - HTTP status code
* @param {string} desc - Verbose description of error
*/
constructor(type, code, desc) {
super(type);
/**
* HTTP status code of error
* @type {number}
*/
this.code = code;
/**
* Description of error
* @type {string}
*/
this.description = desc;
this[type] = true;
}
/**
* Output the error as a JSON string
* @returns {string} Error as JSON string
*/
toString() {
return JSON.stringify({
errorType: this.message,
errorMessage: this.description,
});
}
/**
* Write the error in an HTTP response
*
* @param { http.ServerResponse } res - Response we are responding to
* @returns {undefined}
*/
writeResponse(res) {
res.writeHead(this.code);
res.end(this.toString());
}
/**
* customizeDescription returns a new ArsenalError with a new description
* with the same HTTP code and message.
*
* @param {string} description - New error description
* @returns {ArsenalError} New error
*/
customizeDescription(description) {
return new ArsenalError(this.message, this.code, description);
}
}
/**
* Generate an Errors instances object.
*
* @returns {Object.<string, ArsenalError>} - object field by arsenalError
* instances
*/
function errorsGen() {
const errors = {};
const errorsObj = require('../errors/arsenalErrors.json');
Object.keys(errorsObj)
.filter(index => index !== '_comment')
.forEach(index => {
errors[index] = new ArsenalError(index, errorsObj[index].code,
errorsObj[index].description);
});
return errors;
}
module.exports = errorsGen();

1056
lib/errors/arsenalErrors.ts Normal file

File diff suppressed because it is too large Load Diff

175
lib/errors/index.ts Normal file
View File

@ -0,0 +1,175 @@
import type { ServerResponse } from 'http';
import * as rawErrors from './arsenalErrors';
/** All possible errors names. */
export type Name = keyof typeof rawErrors;
/** Object containing all errors names. It has the format { [Name]: "Name" } */
export type Names = { [Name_ in Name]: Name_ };
/** Mapping used to determine an error type. It has the format { [Name]: boolean } */
export type Is = { [_ in Name]: boolean };
/** Mapping of all possible Errors. It has the format { [Name]: Error } */
export type Errors = { [_ in Name]: ArsenalError };
// This object is reused constantly through createIs, we store it there
// to avoid recomputation.
const isBase = Object.fromEntries(
Object.keys(rawErrors).map((key) => [key, false])
) as Is;
// This allows to conditionally add the old behavior of errors to properly
// test migration.
// Activate CI tests with `ALLOW_UNSAFE_ERROR_COMPARISON=false yarn test`.
// Remove this mechanism in ARSN-176.
export const allowUnsafeErrComp = (
process.env.ALLOW_UNSAFE_ERROR_COMPARISON ?? 'true') === 'true'
// This contains some metaprog. Be careful.
// Proxy can be found on MDN.
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Proxy
// While this could seems better to avoid metaprog, this allows us to enforce
// type-checking properly while avoiding all errors that could happen at runtime.
// Even if some errors are made in JavaScript, like using err.is.NonExistingError,
// the Proxy will return false.
const createIs = (type: Name): Is => {
const get = (is: Is, value: string | symbol) => is[value] ?? false;
const final = Object.freeze({ ...isBase, [type]: true });
return new Proxy(final, { get });
};
export class ArsenalError extends Error {
/** HTTP status code. Example: 401, 403, 500, ... */
#code: number;
/** Text description of the error. */
#description: string;
/** Type of the error. */
#type: Name;
/** Object used to determine the error type.
* Example: error.is.InternalError */
#is: Is;
/** A map of error metadata (can be extra fields
* that only show in debug mode) */
#metadata: Map<string, Object[]>;
private constructor(type: Name, code: number, description: string,
metadata?: Map<string, Object[]>) {
super(type);
this.#code = code;
this.#description = description;
this.#type = type;
this.#is = createIs(type);
this.#metadata = metadata ?? new Map<string, Object[]>();
// This restores the old behavior of errors, to make sure they're now
// backward-compatible. Fortunately it's handled by TS, but it cannot
// be type-checked. This means we have to be extremely careful about
// what we're doing when using errors.
// Disables the feature when in CI tests but not in production.
if (allowUnsafeErrComp) {
this[type] = true;
}
}
/** Output the error as a JSON string */
toString() {
const errorType = this.message;
const errorMessage = this.#description;
return JSON.stringify({ errorType, errorMessage });
}
flatten() {
return {
is_arsenal_error: true,
code: this.#code,
description: this.#description,
type: this.#type,
stack: this.stack
}
}
static unflatten(flat_obj) {
if (!flat_obj.is_arsenal_error) {
return null;
}
const err = new ArsenalError(
flat_obj.type,
flat_obj.code,
flat_obj.description
)
err.stack = flat_obj.stack
return err;
}
/** Write the error in an HTTP response */
writeResponse(res: ServerResponse) {
res.writeHead(this.#code);
const asStr = this.toString();
res.end(asStr);
}
/** Clone the error with a new description.*/
customizeDescription(description: string): ArsenalError {
const type = this.#type;
const code = this.#code;
const metadata = new Map(this.#metadata);
const err = new ArsenalError(type, code, description, metadata);
err.stack = this.stack;
return err;
}
/** Clone the error with a new metadata field */
addMetadataEntry(key: string, value: Object[]): ArsenalError {
const type = this.#type;
const code = this.#code;
const description = this.#description;
const metadata = new Map(this.#metadata);
metadata.set(key, value);
const err = new ArsenalError(type, code, description, metadata);
err.stack = this.stack;
return err;
}
/** Used to determine the error type. Example: error.is.InternalError */
get is() {
return this.#is;
}
/** HTTP status code. Example: 401, 403, 500, ... */
get code() {
return this.#code;
}
/** Text description of the error. */
get description() {
return this.#description;
}
/**
* Type of the error, belonging to Name. is should be prefered instead of
* type in a daily-basis, but type remains accessible for future use. */
get type() {
return this.#type;
}
/** A map of error metadata */
get metadata() {
return this.#metadata;
}
/** Generate all possible errors. An instance is created by default. */
static errors() {
const errors = {};
Object.entries(rawErrors).forEach((value) => {
const name = value[0] as Name;
const error = value[1];
const { code, description } = error;
const get = () => new ArsenalError(name, code, description);
Object.defineProperty(errors, name, { get });
});
return errors as Errors;
}
}
/** Mapping of all possible Errors.
* Use them with errors[error].customizeDescription for any customization. */
export default ArsenalError.errors();

View File

@ -1,6 +1,4 @@
'use strict'; // eslint-disable-line strict
const ciphers = [
export const ciphers = [
'DHE-RSA-AES128-GCM-SHA256',
'ECDHE-ECDSA-AES128-GCM-SHA256',
'ECDHE-RSA-AES256-GCM-SHA384',
@ -28,7 +26,3 @@ const ciphers = [
'!EDH-RSA-DES-CBC3-SHA',
'!KRB5-DES-CBC3-SHA',
].join(':');
module.exports = {
ciphers,
};

View File

@ -29,16 +29,11 @@ c2CNfUEqyRbJF4pE9ZcdQReT5p/llmyhQdvq6cHH+cKJk63C6DHRVoStsnsUcvKe
bLxKsygK77ttjr61cxLoDJeGd5L5h1CPmwIBAg==
-----END DH PARAMETERS-----
*/
'use strict'; // eslint-disable-line strict
const dhparam =
export const dhparam =
'MIIBCAKCAQEAh99T77KGNuiY9N6xrCJ3QNv4SFADTa3CD+1VMTAdRJLHUNpglB+i' +
'AoTYiLDFZgtTCpx0ZZUD+JM3qiCZy0OK5/ZGlVD7sZmxjRtdpVK4qIPtwav8t0J7' +
'c2CNfUEqyRbJF4pE9ZcdQReT5p/llmyhQdvq6cHH+cKJk63C6DHRVoStsnsUcvKe' +
'23PLGZulKg8H3eRBxHamHkmyuEVDtoNhMIoJONsdXSpo5GgcD4EQMM8xb/qsnCxn' +
'6QIGTBvcHskxtlTZOfUPk4XQ6Yb3tQi2TurzkQHLln4U7p/GZs+D+6D3SgSPqr6P' +
'bLxKsygK77ttjr61cxLoDJeGd5L5h1CPmwIBAg==';
module.exports = {
dhparam,
};

2
lib/https/index.ts Normal file
View File

@ -0,0 +1,2 @@
export * as ciphers from './ciphers'
export * as dhparam from './dh2048'

View File

@ -1,83 +0,0 @@
'use strict'; // eslint-disable-line strict
const ipaddr = require('ipaddr.js');
/**
* checkIPinRangeOrMatch checks whether a given ip address is in an ip address
* range or matches the given ip address
* @param {string} cidr - ip address range or ip address
* @param {object} ip - parsed ip address
* @return {boolean} true if in range, false if not
*/
function checkIPinRangeOrMatch(cidr, ip) {
// If there is an exact match of the ip address, no need to check ranges
if (ip.toString() === cidr) {
return true;
}
let range;
try {
range = ipaddr.IPv4.parseCIDR(cidr);
} catch (err) {
try {
// not ipv4 so try ipv6
range = ipaddr.IPv6.parseCIDR(cidr);
} catch (err) {
// range is not valid ipv4 or ipv6
return false;
}
}
try {
return ip.match(range);
} catch (err) {
return false;
}
}
/**
* Parse IP address into object representation
* @param {string} ip - IPV4/IPV6/IPV4-mapped IPV6 address
* @return {object} parsedIp - Object representation of parsed IP
*/
function parseIp(ip) {
if (ipaddr.IPv4.isValid(ip)) {
return ipaddr.parse(ip);
}
if (ipaddr.IPv6.isValid(ip)) {
// also parses IPv6 mapped IPv4 addresses into IPv4 representation
return ipaddr.process(ip);
}
// not valid ip address according to module, so return empty object
// which will obviously not match a range of ip addresses that the parsedIp
// is being tested against
return {};
}
/**
* Checks if an IP adress matches a given list of CIDR ranges
* @param {string[]} cidrList - List of CIDR ranges
* @param {string} ip - IP address
* @return {boolean} - true if there is match or false for no match
*/
function ipMatchCidrList(cidrList, ip) {
const parsedIp = parseIp(ip);
return cidrList.some(item => {
let cidr;
// patch the cidr if range is not specified
if (item.indexOf('/') === -1) {
if (item.startsWith('127.')) {
cidr = `${item}/8`;
} else if (ipaddr.IPv4.isValid(item)) {
cidr = `${item}/32`;
}
}
return checkIPinRangeOrMatch(cidr || item, parsedIp);
});
}
module.exports = {
checkIPinRangeOrMatch,
ipMatchCidrList,
parseIp,
};

71
lib/ipCheck.ts Normal file
View File

@ -0,0 +1,71 @@
import ipaddr from 'ipaddr.js';
/**
* checkIPinRangeOrMatch checks whether a given ip address is in an ip address
* range or matches the given ip address
* @param cidr - ip address range or ip address
* @param ip - parsed ip address
* @return true if in range, false if not
*/
export function checkIPinRangeOrMatch(
cidr: string,
ip: ipaddr.IPv4 | ipaddr.IPv6,
): boolean {
// If there is an exact match of the ip address, no need to check ranges
if (ip.toString() === cidr) {
return true;
}
try {
if (ip instanceof ipaddr.IPv6) {
const range = ipaddr.IPv6.parseCIDR(cidr);
return ip.match(range);
} else {
const range = ipaddr.IPv4.parseCIDR(cidr);
return ip.match(range);
}
} catch (error) {
return false;
}
}
/**
* Parse IP address into object representation
* @param ip - IPV4/IPV6/IPV4-mapped IPV6 address
* @return parsedIp - Object representation of parsed IP
*/
export function parseIp(ip: string): ipaddr.IPv4 | ipaddr.IPv6 | {} {
if (ipaddr.IPv4.isValid(ip)) {
return ipaddr.parse(ip);
}
if (ipaddr.IPv6.isValid(ip)) {
// also parses IPv6 mapped IPv4 addresses into IPv4 representation
return ipaddr.process(ip);
}
return {};
}
/**
* Checks if an IP adress matches a given list of CIDR ranges
* @param cidrList - List of CIDR ranges
* @param ip - IP address
* @return - true if there is match or false for no match
*/
export function ipMatchCidrList(cidrList: string[], ip: string): boolean {
const parsedIp = parseIp(ip);
return cidrList.some((item) => {
let cidr: string | undefined;
// patch the cidr if range is not specified
if (item.indexOf('/') === -1) {
if (item.startsWith('127.')) {
cidr = `${item}/8`;
} else if (ipaddr.IPv4.isValid(item)) {
cidr = `${item}/32`;
}
}
return (
(parsedIp instanceof ipaddr.IPv4 ||
parsedIp instanceof ipaddr.IPv6) &&
checkIPinRangeOrMatch(cidr || item, parsedIp)
);
});
}

View File

@ -1,32 +0,0 @@
'use strict'; // eslint-disable-line
const debug = require('util').debuglog('jsutil');
// JavaScript utility functions
/**
* force <tt>func</tt> to be called only once, even if actually called
* multiple times. The cached result of the first call is then
* returned (if any).
*
* @note underscore.js provides this functionality but not worth
* adding a new dependency for such a small use case.
*
* @param {function} func function to call at most once
* @return {function} a callable wrapper mirroring <tt>func</tt> but
* only calls <tt>func</tt> at first invocation.
*/
module.exports.once = function once(func) {
const state = { called: false, res: undefined };
return function wrapper(...args) {
if (!state.called) {
state.called = true;
state.res = func.apply(func, args);
} else {
debug('function already called:', func,
'returning cached result:', state.res);
}
return state.res;
};
};

33
lib/jsutil.ts Normal file
View File

@ -0,0 +1,33 @@
import * as util from 'util';
const debug = util.debuglog('jsutil');
// JavaScript utility functions
/**
* force <tt>func</tt> to be called only once, even if actually called
* multiple times. The cached result of the first call is then
* returned (if any).
*
* @note underscore.js provides this functionality but not worth
* adding a new dependency for such a small use case.
*
* @param func function to call at most once
* @return a callable wrapper mirroring <tt>func</tt> but
* only calls <tt>func</tt> at first invocation.
*/
export function once<T>(func: (...args: any[]) => T): (...args: any[]) => T {
type State = { called: boolean; res: any };
const state: State = { called: false, res: undefined };
return function wrapper(...args: any[]) {
if (!state.called) {
state.called = true;
state.res = func.apply(func, args);
} else {
const m1 = 'function already called:';
const m2 = 'returning cached result:';
debug(m1, func, m2, state.res);
}
return state.res;
};
}

View File

@ -1,230 +0,0 @@
const Redis = require('ioredis');
class RedisClient {
/**
* @constructor
* @param {Object} config - config
* @param {string} config.host - Redis host
* @param {number} config.port - Redis port
* @param {string} config.password - Redis password
* @param {werelogs.Logger} logger - logger instance
*/
constructor(config, logger) {
this._client = new Redis(config);
this._client.on('error', err =>
logger.trace('error from redis', {
error: err,
method: 'RedisClient.constructor',
redisHost: config.host,
redisPort: config.port,
}),
);
return this;
}
/**
* scan a pattern and return matching keys
* @param {string} pattern - string pattern to match with all existing keys
* @param {number} [count=10] - scan count
* @param {callback} cb - callback (error, result)
* @return {undefined}
*/
scan(pattern, count = 10, cb) {
const params = { match: pattern, count };
const keys = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/**
* increment value of a key by 1 and set a ttl
* @param {string} key - key holding the value
* @param {number} expiry - expiry in seconds
* @param {callback} cb - callback
* @return {undefined}
*/
incrEx(key, expiry, cb) {
return this._client
.multi([['incr', key], ['expire', key, expiry]])
.exec(cb);
}
/**
* increment value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, amount, cb) {
return this._client.incrby(key, amount, cb);
}
/**
* increment value of a key by a given amount and set a ttl
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {number} expiry - expiry in seconds
* @param {callback} cb - callback
* @return {undefined}
*/
incrbyEx(key, amount, expiry, cb) {
return this._client
.multi([['incrby', key, amount], ['expire', key, expiry]])
.exec(cb);
}
/**
* decrement value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
decrby(key, amount, cb) {
return this._client.decrby(key, amount, cb);
}
/**
* get value stored at key
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
return this._client.get(key, cb);
}
/**
* Checks if a key exists
* @param {string} key - name of key
* @param {function} cb - callback
* If cb response returns 0, key does not exist.
* If cb response returns 1, key exists.
* @return {undefined}
*/
exists(key, cb) {
return this._client.exists(key, cb);
}
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
}
/**
* Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key.
* @param {string} key - name of key
* @param {integer} score - score used to order set
* @param {string} value - value to store
* @param {callback} cb - callback
* @return {undefined}
*/
zadd(key, score, value, cb) {
return this._client.zadd(key, score, value, cb);
}
/**
* Get number of elements in a sorted set.
* Note: using this on a key that does not exist will return 0.
* Note: using this on an existing key that isn't a sorted set will
* return an error WRONGTYPE.
* @param {string} key - name of key
* @param {function} cb - callback
* @return {undefined}
*/
zcard(key, cb) {
return this._client.zcard(key, cb);
}
/**
* Get the score for given value in a sorted set
* Note: using this on a key that does not exist will return nil.
* Note: using this on a value that does not exist in a valid sorted set key
* will return nil.
* @param {string} key - name of key
* @param {string} value - value within sorted set
* @param {function} cb - callback
* @return {undefined}
*/
zscore(key, value, cb) {
return this._client.zscore(key, value, cb);
}
/**
* Remove a value from a sorted set
* @param {string} key - name of key
* @param {string|array} value - value within sorted set. Can specify
* multiple values within an array
* @param {function} cb - callback
* The cb response returns number of values removed
* @return {undefined}
*/
zrem(key, value, cb) {
return this._client.zrem(key, value, cb);
}
/**
* Get specified range of elements in a sorted set
* @param {string} key - name of key
* @param {integer} start - start index (inclusive)
* @param {integer} end - end index (inclusive) (can use -1)
* @param {function} cb - callback
* @return {undefined}
*/
zrange(key, start, end, cb) {
return this._client.zrange(key, start, end, cb);
}
/**
* Get range of elements in a sorted set based off score
* @param {string} key - name of key
* @param {integer|string} min - min score value (inclusive)
* (can use "-inf")
* @param {integer|string} max - max score value (inclusive)
* (can use "+inf")
* @param {function} cb - callback
* @return {undefined}
*/
zrangebyscore(key, min, max, cb) {
return this._client.zrangebyscore(key, min, max, cb);
}
/**
* get TTL or expiration in seconds
* @param {string} key - name of key
* @param {function} cb - callback
* @return {undefined}
*/
ttl(key, cb) {
return this._client.ttl(key, cb);
}
clear(cb) {
return this._client.flushdb(cb);
}
disconnect(cb) {
return this._client.quit(cb);
}
listClients(cb) {
return this._client.client('list', cb);
}
}
module.exports = RedisClient;

218
lib/metrics/RedisClient.ts Normal file
View File

@ -0,0 +1,218 @@
import Redis from 'ioredis';
import { Logger } from 'werelogs';
export type Config = { host: string; port: number; password: string };
export type Callback = (error: Error | null, value?: any) => void;
export default class RedisClient {
_client: Redis.Redis;
constructor(config: Config, logger: Logger) {
this._client = new Redis(config);
this._client.on('error', err =>
logger.trace('error from redis', {
error: err,
method: 'RedisClient.constructor',
redisHost: config.host,
redisPort: config.port,
})
);
return this;
}
/**
* scan a pattern and return matching keys
* @param pattern - string pattern to match with all existing keys
* @param [count=10] - scan count
* @param cb - callback (error, result)
*/
scan(pattern: string, count = 10, cb: Callback) {
const params = { match: pattern, count };
const keys: any[] = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/** increment value of a key by 1 and set a ttl
* @param key - key holding the value
* @param expiry - expiry in seconds
* @param cb - callback
*/
incrEx(key: string, expiry: number, cb: Callback) {
const exp = expiry.toString();
return this._client
.multi([['incr', key], ['expire', key, exp]])
.exec(cb);
}
/**
* increment value of a key by a given amount
* @param key - key holding the value
* @param amount - amount to increase by
* @param cb - callback
*/
incrby(key: string, amount: number, cb: Callback) {
return this._client.incrby(key, amount, cb);
}
/** increment value of a key by a given amount and set a ttl
* @param key - key holding the value
* @param amount - amount to increase by
* @param expiry - expiry in seconds
* @param cb - callback
*/
incrbyEx(key: string, amount: number, expiry: number, cb: Callback) {
const am = amount.toString();
const exp = expiry.toString();
return this._client
.multi([['incrby', key, am], ['expire', key, exp]])
.exec(cb);
}
/**
* decrement value of a key by a given amount
* @param key - key holding the value
* @param amount - amount to increase by
* @param cb - callback
*/
decrby(key: string, amount: number, cb: Callback) {
return this._client.decrby(key, amount, cb);
}
/**
* execute a batch of commands
* @param cmds - list of commands
* @param cb - callback
* @return
*/
batch(cmds: string[][], cb: Callback) {
return this._client.pipeline(cmds).exec(cb);
}
/**
* Checks if a key exists
* @param key - name of key
* @param cb - callback
* If cb response returns 0, key does not exist.
* If cb response returns 1, key exists.
*/
exists(key: string, cb: Callback) {
return this._client.exists(key, cb);
}
/**
* get value stored at key
* @param key - key holding the value
* @param cb - callback
*/
get(key: string, cb: Callback) {
return this._client.get(key, cb);
}
/**
* Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key.
* @param key - name of key
* @param score - score used to order set
* @param value - value to store
* @param cb - callback
*/
zadd(key: string, score: number, value: string, cb: Callback) {
return this._client.zadd(key, score, value, cb);
}
/**
* Get number of elements in a sorted set.
* Note: using this on a key that does not exist will return 0.
* Note: using this on an existing key that isn't a sorted set will
* return an error WRONGTYPE.
* @param key - name of key
* @param cb - callback
*/
zcard(key: string, cb: Callback) {
return this._client.zcard(key, cb);
}
/**
* Get the score for given value in a sorted set
* Note: using this on a key that does not exist will return nil.
* Note: using this on a value that does not exist in a valid sorted set key
* will return nil.
* @param key - name of key
* @param value - value within sorted set
* @param cb - callback
*/
zscore(key: string, value: string, cb: Callback) {
return this._client.zscore(key, value, cb);
}
/**
* Remove a value from a sorted set
* @param key - name of key
* @param value - value within sorted set. Can specify
* multiple values within an array
* @param cb - callback
* The cb response returns number of values removed
*/
zrem(key: string, value: string | string[], cb: Callback) {
return this._client.zrem(key, value, cb);
}
/**
* Get specified range of elements in a sorted set
* @param key - name of key
* @param start - start index (inclusive)
* @param end - end index (inclusive) (can use -1)
* @param cb - callback
*/
zrange(key: string, start: number, end: number, cb: Callback) {
return this._client.zrange(key, start, end, cb);
}
/**
* Get range of elements in a sorted set based off score
* @param key - name of key
* @param min - min score value (inclusive)
* (can use "-inf")
* @param max - max score value (inclusive)
* (can use "+inf")
* @param cb - callback
*/
zrangebyscore(
key: string,
min: number | string,
max: number | string,
cb: Callback,
) {
return this._client.zrangebyscore(key, min, max, cb);
}
/**
* get TTL or expiration in seconds
* @param key - name of key
* @param cb - callback
*/
ttl(key: string, cb: Callback) {
return this._client.ttl(key, cb);
}
clear(cb: Callback) {
return this._client.flushdb(cb);
}
disconnect() {
this._client.disconnect();
}
listClients(cb: Callback) {
return this._client.client('list', cb);
}
}

View File

@ -1,62 +1,68 @@
const async = require('async');
import async from 'async';
import RedisClient from './RedisClient';
import { Logger } from 'werelogs';
export type Callback = (error: Error | null, value?: any) => void;
export default class StatsClient {
_redis: RedisClient;
_interval: number;
_expiry: number;
class StatsClient {
/**
* @constructor
* @param {object} redisClient - RedisClient instance
* @param {number} interval - sampling interval by seconds
* @param {number} expiry - sampling duration by seconds
* @param redisClient - RedisClient instance
* @param interval - sampling interval by seconds
* @param expiry - sampling duration by seconds
*/
constructor(redisClient, interval, expiry) {
constructor(redisClient: RedisClient, interval: number, expiry: number) {
this._redis = redisClient;
this._interval = interval;
this._expiry = expiry;
return this;
}
/*
* Utility function to use when callback is undefined
*/
/** Utility function to use when callback is undefined */
_noop() {}
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
* @param d - Date instance
* @return timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
_normalizeTimestamp(d: Date): number {
const s = d.getSeconds();
return d.setSeconds(s - s % this._interval, 0);
}
/**
* set timestamp to the previous interval
* @param {object} d - Date instance
* @return {number} timestamp - set to the previous interval
* @param d - Date instance
* @return timestamp - set to the previous interval
*/
_setPrevInterval(d) {
_setPrevInterval(d: Date): number {
return d.setSeconds(d.getSeconds() - this._interval);
}
/**
* build redis key to get total number of occurrences on the server
* @param {string} name - key name identifier
* @param {Date} date - Date instance
* @return {string} key - key for redis
* @param name - key name identifier
* @param d - Date instance
* @return key - key for redis
*/
buildKey(name, date) {
return `${name}:${this._normalizeTimestamp(date)}`;
buildKey(name: string, d: Date): string {
return `${name}:${this._normalizeTimestamp(d)}`;
}
/**
* reduce the array of values to a single value
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - Date instance
* @return {string} key - key for redis
* @param arr - Date instance
* @return key - key for redis
*/
_getCount(arr) {
_getCount(arr: [any, string | null][]): number {
return arr.reduce((prev, a) => {
let num = parseInt(a[1], 10);
let num = parseInt(a[1] ?? '', 10);
num = Number.isNaN(num) ? 0 : num;
return prev + num;
}, 0);
@ -64,18 +70,20 @@ class StatsClient {
/**
* report/record a new request received on the server
* @param {string} id - service identifier
* @param {number} incr - optional param increment
* @param {function} cb - callback
* @return {undefined}
* @param id - service identifier
* @param incr - optional param increment
*/
reportNewRequest(id, incr, cb) {
reportNewRequest(
id: string,
incr?: number | ((error: Error | null, value?: any) => void),
cb?: (error: Error | null, value?: any) => void,
) {
if (!this._redis) {
return undefined;
}
let callback;
let amount;
let callback: (error: Error | null, value?: any) => void;
let amount: number;
if (typeof incr === 'function') {
// In case where optional `incr` is not passed, but `cb` is passed
callback = incr;
@ -92,35 +100,31 @@ class StatsClient {
/**
* Increment the given key by the given value.
* @param {String} key - The Redis key to increment
* @param {Number} incr - The value to increment by
* @param {function} [cb] - callback
* @return {undefined}
* @param key - The Redis key to increment
* @param incr - The value to increment by
* @param [cb] - callback
*/
incrementKey(key, incr, cb) {
incrementKey(key: string, incr: number, cb: Callback) {
const callback = cb || this._noop;
return this._redis.incrby(key, incr, callback);
}
/**
* Decrement the given key by the given value.
* @param {String} key - The Redis key to decrement
* @param {Number} decr - The value to decrement by
* @param {function} [cb] - callback
* @return {undefined}
* @param key - The Redis key to decrement
* @param decr - The value to decrement by
* @param [cb] - callback
*/
decrementKey(key, decr, cb) {
decrementKey(key: string, decr: number, cb: Callback) {
const callback = cb || this._noop;
return this._redis.decrby(key, decr, callback);
}
/**
* report/record a request that ended up being a 500 on the server
* @param {string} id - service identifier
* @param {callback} cb - callback
* @return {undefined}
* @param id - service identifier
*/
report500(id, cb) {
report500(id: string, cb?: (error: Error | null, value?: any) => void) {
if (!this._redis) {
return undefined;
}
@ -131,12 +135,11 @@ class StatsClient {
/**
* wrapper on `getStats` that handles a list of keys
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
* @param log - Werelogs request logger
* @param ids - service identifiers
* @param cb - callback to call with the err/result
*/
getAllStats(log, ids, cb) {
getAllStats(log: Logger, ids: string[], cb: Callback) {
if (!this._redis) {
return cb(null, {});
}
@ -150,7 +153,7 @@ class StatsClient {
let errors = 0;
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
return async.eachLimit(ids, 10, (id: string, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
@ -175,19 +178,17 @@ class StatsClient {
/**
* get stats for the last x seconds, x being the sampling duration
* @param {object} log - Werelogs request logger
* @param {string} id - service identifier
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
* @param log - Werelogs request logger
* @param id - service identifier
*/
getStats(log, id, cb) {
getStats(log: Logger, id: string, cb: (error: Error | null, value?: any) => void) {
if (!this._redis) {
return cb(null, {});
}
const d = new Date();
const totalKeys = Math.floor(this._expiry / this._interval);
const reqsKeys = [];
const req500sKeys = [];
const reqsKeys: ['get', string][] = [];
const req500sKeys: ['get', string][] = [];
for (let i = 0; i < totalKeys; i++) {
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
@ -221,11 +222,9 @@ class StatsClient {
*/
return cb(null, statsRes);
}
statsRes.requests = this._getCount(results[0]);
statsRes['500s'] = this._getCount(results[1]);
statsRes.requests = this._getCount((results as any)[0]);
statsRes['500s'] = this._getCount((results as any)[1]);
return cb(null, statsRes);
});
}
}
module.exports = StatsClient;

View File

@ -1,6 +1,8 @@
const async = require('async');
import StatsClient from './StatsClient';
import { Logger } from 'werelogs';
import async from 'async';
const StatsClient = require('./StatsClient');
export type Callback = (error: Error | null, value?: any) => void;
/**
* @class StatsModel
@ -8,14 +10,14 @@ const StatsClient = require('./StatsClient');
* @classdesc Extend and overwrite how timestamps are normalized by minutes
* rather than by seconds
*/
class StatsModel extends StatsClient {
export default class StatsModel extends StatsClient {
/**
* Utility method to convert 2d array rows to columns, and vice versa
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
* @param {array} arrays - 2d array of integers
* @return {array} converted array
* @param arrays - 2d array of integers
* @return converted array
*/
_zip(arrays) {
_zip(arrays: number[][]) {
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
return arrays[0].map((_, i) => arrays.map(a => a[i]));
}
@ -24,10 +26,10 @@ class StatsModel extends StatsClient {
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
* @param d - Date instance
* @return timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
_normalizeTimestamp(d: Date) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
@ -36,19 +38,20 @@ class StatsModel extends StatsClient {
* override the method to get the count as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* @param arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* @return array of integers, ordered from most recent interval to
* oldest interval with length of (expiry / interval)
*/
_getCount(arr) {
// @ts-expect-errors
_getCount(arr: [any, string | null][]): number[] {
const size = Math.floor(this._expiry / this._interval);
const array = arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
let num = parseInt(i[1] ??'', 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
}, [] as number[]);
if (array.length < size) {
array.push(...Array(size - array.length).fill(0));
@ -59,12 +62,11 @@ class StatsModel extends StatsClient {
/**
* wrapper on `getStats` that handles a list of keys
* override the method to reduce the returned 2d array from `_getCount`
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
* @param log - Werelogs request logger
* @param ids - service identifiers
* @param cb - callback to call with the err/result
*/
getAllStats(log, ids, cb) {
getAllStats(log: Logger, ids: string[], cb: Callback) {
if (!this._redis) {
return cb(null, {});
}
@ -75,8 +77,8 @@ class StatsModel extends StatsClient {
'500s': Array(size).fill(0),
'sampleDuration': this._expiry,
};
const requests = [];
const errors = [];
const requests: any[] = [];
const errors: any[] = [];
if (ids.length === 0) {
return cb(null, statsRes);
@ -112,12 +114,11 @@ class StatsModel extends StatsClient {
/**
* Handles getting a list of global keys.
* @param {array} ids - Service identifiers
* @param {object} log - Werelogs request logger
* @param {function} cb - Callback
* @return {undefined}
* @param ids - Service identifiers
* @param log - Werelogs request logger
* @param cb - Callback
*/
getAllGlobalStats(ids, log, cb) {
getAllGlobalStats(ids: string[], log: Logger, cb: Callback) {
const reqsKeys = ids.map(key => (['get', key]));
return this._redis.batch(reqsKeys, (err, res) => {
const statsRes = { requests: 0 };
@ -145,29 +146,29 @@ class StatsModel extends StatsClient {
/**
* normalize date timestamp to the nearest hour
* @param {Date} d - Date instance
* @return {number} timestamp - normalized to the nearest hour
* @param d - Date instance
* @return timestamp - normalized to the nearest hour
*/
normalizeTimestampByHour(d) {
normalizeTimestampByHour(d: Date) {
return d.setMinutes(0, 0, 0);
}
/**
* get previous hour to date given
* @param {Date} d - Date instance
* @return {number} timestamp - one hour prior to date passed
* @param d - Date instance
* @return timestamp - one hour prior to date passed
*/
_getDatePreviousHour(d) {
_getDatePreviousHour(d: Date) {
return d.setHours(d.getHours() - 1);
}
/**
* get list of sorted set key timestamps
* @param {number} epoch - epoch time
* @return {array} array of sorted set key timestamps
* @param epoch - epoch time
* @return array of sorted set key timestamps
*/
getSortedSetHours(epoch) {
const timestamps = [];
getSortedSetHours(epoch: number) {
const timestamps: number[] = [];
let date = this.normalizeTimestampByHour(new Date(epoch));
while (timestamps.length < 24) {
timestamps.push(date);
@ -178,22 +179,26 @@ class StatsModel extends StatsClient {
/**
* get the normalized hour timestamp for given epoch time
* @param {number} epoch - epoch time
* @return {string} normalized hour timestamp for given time
* @param epoch - epoch time
* @return normalized hour timestamp for given time
*/
getSortedSetCurrentHour(epoch) {
getSortedSetCurrentHour(epoch: number) {
return this.normalizeTimestampByHour(new Date(epoch));
}
/**
* helper method to add element to a sorted set, applying TTL if new set
* @param {string} key - name of key
* @param {integer} score - score used to order set
* @param {string} value - value to store
* @param {callback} cb - callback
* @return {undefined}
* @param key - name of key
* @param score - score used to order set
* @param value - value to store
* @param cb - callback
*/
addToSortedSet(key, score, value, cb) {
addToSortedSet(
key: string,
score: number,
value: string,
cb: (error: Error | null, value?: any) => void,
) {
this._redis.exists(key, (err, resCode) => {
if (err) {
return cb(err);
@ -206,14 +211,14 @@ class StatsModel extends StatsClient {
const ttl = Math.ceil(
(msInADay - (Date.now() - nearestHour)) / 1000);
const cmds = [
['zadd', key, score, value],
['expire', key, ttl],
['zadd', key, score.toString(), value],
['expire', key, ttl.toString()],
];
return this._redis.batch(cmds, (err, res) => {
if (err) {
return cb(err);
}
const cmdErr = res.find(r => r[0] !== null);
const cmdErr = res.find((r: any) => r[0] !== null);
if (cmdErr) {
return cb(cmdErr);
}
@ -225,5 +230,3 @@ class StatsModel extends StatsClient {
});
}
}
module.exports = StatsModel;

View File

@ -1,40 +0,0 @@
const promClient = require('prom-client');
const collectDefaultMetricsIntervalMs =
process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS !== undefined ?
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
10000;
promClient.collectDefaultMetrics({ timeout: collectDefaultMetricsIntervalMs });
class ZenkoMetrics {
static createCounter(params) {
return new promClient.Counter(params);
}
static createGauge(params) {
return new promClient.Gauge(params);
}
static createHistogram(params) {
return new promClient.Histogram(params);
}
static createSummary(params) {
return new promClient.Summary(params);
}
static getMetric(name) {
return promClient.register.getSingleMetric(name);
}
static asPrometheus() {
return promClient.register.metrics();
}
static asPrometheusContentType() {
return promClient.register.contentType;
}
}
module.exports = ZenkoMetrics;

View File

@ -0,0 +1,35 @@
import promClient from 'prom-client';
export default class ZenkoMetrics {
static createCounter(params: promClient.CounterConfiguration<string>) {
return new promClient.Counter(params);
}
static createGauge(params: promClient.GaugeConfiguration<string>) {
return new promClient.Gauge(params);
}
static createHistogram(params: promClient.HistogramConfiguration<string>) {
return new promClient.Histogram(params);
}
static createSummary(params: promClient.SummaryConfiguration<string>) {
return new promClient.Summary(params);
}
static getMetric(name: string) {
return promClient.register.getSingleMetric(name);
}
static async asPrometheus() {
return promClient.register.metrics();
}
static asPrometheusContentType() {
return promClient.register.contentType;
}
static collectDefaultMetrics() {
return promClient.collectDefaultMetrics();
}
}

4
lib/metrics/index.ts Normal file
View File

@ -0,0 +1,4 @@
export { default as StatsClient } from './StatsClient';
export { default as StatsModel } from './StatsModel';
export { default as RedisClient } from './RedisClient';
export { default as ZenkoMetrics } from './ZenkoMetrics';

View File

@ -1,23 +1,35 @@
const errors = require('../errors');
import errors from '../errors'
const validServices = {
aws: ['s3', 'iam', 'sts', 'ring'],
scality: ['utapi', 'sso'],
};
class ARN {
export default class ARN {
_partition: string;
_service: string;
_region: string | null;
_accountId?: string | null;
_resource: string;
/**
*
* Create an ARN object from its individual components
*
* @constructor
* @param {string} partition - ARN partition (e.g. 'aws')
* @param {string} service - service name in partition (e.g. 's3')
* @param {string} [region] - AWS region
* @param {string} [accountId] - AWS 12-digit account ID
* @param {string} resource - AWS resource path (e.g. 'foo/bar')
* @param partition - ARN partition (e.g. 'aws')
* @param service - service name in partition (e.g. 's3')
* @param [region] - AWS region
* @param [accountId] - AWS 12-digit account ID
* @param resource - AWS resource path (e.g. 'foo/bar')
*/
constructor(partition, service, region, accountId, resource) {
constructor(
partition: string,
service: string,
region: string | undefined | null,
accountId: string | undefined | null,
resource: string,
) {
this._partition = partition;
this._service = service;
this._region = region || null;
@ -25,7 +37,7 @@ class ARN {
this._resource = resource;
}
static createFromString(arnStr) {
static createFromString(arnStr: string) {
const [arn, partition, service, region, accountId,
resourceType, resource] = arnStr.split(':');
@ -102,5 +114,3 @@ class ARN {
.join(':');
}
}
module.exports = ARN;

View File

@ -1,22 +1,36 @@
const { legacyLocations } = require('../constants');
const escapeForXml = require('../s3middleware/escapeForXml');
import { RequestLogger } from 'werelogs';
import { legacyLocations } from '../constants';
import escapeForXml from '../s3middleware/escapeForXml';
type CloudServerConfig = any;
export default class BackendInfo {
_config: CloudServerConfig;
_requestEndpoint: string;
_objectLocationConstraint?: string;
_bucketLocationConstraint?: string;
_legacyLocationConstraint?: string;
class BackendInfo {
/**
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @constructor
* @param {object} config - CloudServer config containing list of locations
* @param {string | undefined} objectLocationConstraint - location constraint
* @param config - CloudServer config containing list of locations
* @param objectLocationConstraint - location constraint
* for object based on user meta header
* @param {string | undefined } bucketLocationConstraint - location
* @param bucketLocationConstraint - location
* constraint for bucket based on bucket metadata
* @param {string} requestEndpoint - endpoint to which request was made
* @param {string | undefined } legacyLocationConstraint - legacy location
* constraint
* @param requestEndpoint - endpoint to which request was made
* @param legacyLocationConstraint - legacy location constraint
*/
constructor(config, objectLocationConstraint, bucketLocationConstraint,
requestEndpoint, legacyLocationConstraint) {
constructor(
config: CloudServerConfig,
objectLocationConstraint: string | undefined,
bucketLocationConstraint: string | undefined,
requestEndpoint: string,
legacyLocationConstraint: string | undefined,
) {
this._config = config;
this._objectLocationConstraint = objectLocationConstraint;
this._bucketLocationConstraint = bucketLocationConstraint;
@ -27,15 +41,18 @@ class BackendInfo {
/**
* validate proposed location constraint against config
* @param {object} config - CloudServer config
* @param {string | undefined} locationConstraint - value of user
* @param config - CloudServer config
* @param locationConstraint - value of user
* metadata location constraint header or bucket location constraint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
* @param log - werelogs logger
* @return - true if valid, false if not
*/
static isValidLocationConstraint(config, locationConstraint, log) {
if (Object.keys(config.locationConstraints).
indexOf(locationConstraint) < 0) {
static isValidLocationConstraint(
config: CloudServerConfig,
locationConstraint: string | undefined,
log: RequestLogger,
) {
if (!locationConstraint || !(locationConstraint in config.locationConstraints)) {
log.trace('proposed locationConstraint is invalid',
{ locationConstraint });
return false;
@ -45,14 +62,17 @@ class BackendInfo {
/**
* validate that request endpoint is listed in the restEndpoint config
* @param {object} config - CloudServer config
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if present, false if not
* @param config - CloudServer config
* @param requestEndpoint - request endpoint
* @param log - werelogs logger
* @return true if present, false if not
*/
static isRequestEndpointPresent(config, requestEndpoint, log) {
if (Object.keys(config.restEndpoints).
indexOf(requestEndpoint) < 0) {
static isRequestEndpointPresent(
config: CloudServerConfig,
requestEndpoint: string,
log: RequestLogger,
) {
if (!(requestEndpoint in config.restEndpoints)) {
log.trace('requestEndpoint does not match config restEndpoints',
{ requestEndpoint });
return false;
@ -63,14 +83,18 @@ class BackendInfo {
/**
* validate that locationConstraint for request Endpoint matches
* one config locationConstraint
* @param {object} config - CloudServer config
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if matches, false if not
* @param config - CloudServer config
* @param requestEndpoint - request endpoint
* @param log - werelogs logger
* @return - true if matches, false if not
*/
static isRequestEndpointValueValid(config, requestEndpoint, log) {
if (Object.keys(config.locationConstraints).
indexOf(config.restEndpoints[requestEndpoint]) < 0) {
static isRequestEndpointValueValid(
config: CloudServerConfig,
requestEndpoint: string,
log: RequestLogger,
) {
const restEndpoint = config.restEndpoints[requestEndpoint];
if (!(restEndpoint in config.locationConstraints)) {
log.trace('the default locationConstraint for request' +
'Endpoint does not match any config locationConstraint',
{ requestEndpoint });
@ -81,11 +105,11 @@ class BackendInfo {
/**
* validate that s3 server is running with a file or memory backend
* @param {object} config - CloudServer config
* @param {object} log - werelogs logger
* @return {boolean} - true if running with file/mem backend, false if not
* @param config - CloudServer config
* @param log - werelogs logger
* @return - true if running with file/mem backend, false if not
*/
static isMemOrFileBackend(config, log) {
static isMemOrFileBackend(config: CloudServerConfig, log: RequestLogger) {
if (config.backends.data === 'mem' || config.backends.data === 'file') {
log.trace('use data backend for the location', {
dataBackend: config.backends.data,
@ -103,12 +127,16 @@ class BackendInfo {
* data backend for the location.
* - if locationConstraint for request Endpoint does not match
* any config locationConstraint, we will return an error
* @param {object} config - CloudServer config
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
* @param config - CloudServer config
* @param requestEndpoint - request endpoint
* @param log - werelogs logger
* @return - true if valid, false if not
*/
static isValidRequestEndpointOrBackend(config, requestEndpoint, log) {
static isValidRequestEndpointOrBackend(
config: CloudServerConfig,
requestEndpoint: string,
log: RequestLogger,
) {
if (!BackendInfo.isRequestEndpointPresent(config, requestEndpoint,
log)) {
return BackendInfo.isMemOrFileBackend(config, log);
@ -119,17 +147,22 @@ class BackendInfo {
/**
* validate controlling BackendInfo Parameter
* @param {object} config - CloudServer config
* @param {string | undefined} objectLocationConstraint - value of user
* @param config - CloudServer config
* @param objectLocationConstraint - value of user
* metadata location constraint header
* @param {string | null} bucketLocationConstraint - location
* @param bucketLocationConstraint - location
* constraint from bucket metadata
* @param {string} requestEndpoint - endpoint of request
* @param {object} log - werelogs logger
* @return {object} - location constraint validity
* @param requestEndpoint - endpoint of request
* @param log - werelogs logger
* @return - location constraint validity
*/
static controllingBackendParam(config, objectLocationConstraint,
bucketLocationConstraint, requestEndpoint, log) {
static controllingBackendParam(
config: CloudServerConfig,
objectLocationConstraint: string | undefined,
bucketLocationConstraint: string | null,
requestEndpoint: string,
log: RequestLogger,
) {
if (objectLocationConstraint) {
if (BackendInfo.isValidLocationConstraint(config,
objectLocationConstraint, log)) {
@ -175,16 +208,16 @@ class BackendInfo {
/**
* Return legacyLocationConstraint
* @param {object} config CloudServer config
* @return {string | undefined} legacyLocationConstraint;
* @param config CloudServer config
* @return legacyLocationConstraint;
*/
static getLegacyLocationConstraint(config) {
static getLegacyLocationConstraint(config: CloudServerConfig) {
return legacyLocations.find(ll => config.locationConstraints[ll]);
}
/**
* Return objectLocationConstraint
* @return {string | undefined} objectLocationConstraint;
* @return objectLocationConstraint;
*/
getObjectLocationConstraint() {
return this._objectLocationConstraint;
@ -192,7 +225,7 @@ class BackendInfo {
/**
* Return bucketLocationConstraint
* @return {string | undefined} bucketLocationConstraint;
* @return bucketLocationConstraint;
*/
getBucketLocationConstraint() {
return this._bucketLocationConstraint;
@ -200,7 +233,7 @@ class BackendInfo {
/**
* Return requestEndpoint
* @return {string} requestEndpoint;
* @return requestEndpoint;
*/
getRequestEndpoint() {
return this._requestEndpoint;
@ -215,9 +248,9 @@ class BackendInfo {
* (4) default locationConstraint for requestEndpoint if requestEndpoint
* is listed in restEndpoints in config.json
* (5) default data backend
* @return {string} locationConstraint;
* @return locationConstraint;
*/
getControllingLocationConstraint() {
getControllingLocationConstraint(): string {
const objectLC = this.getObjectLocationConstraint();
const bucketLC = this.getBucketLocationConstraint();
const reqEndpoint = this.getRequestEndpoint();
@ -236,5 +269,3 @@ class BackendInfo {
return this._config.backends.data;
}
}
module.exports = BackendInfo;

Some files were not shown because too many files have changed in this diff Show More