Compare commits

...

986 Commits

Author SHA1 Message Date
Vitaliy Filippov b5711e9cbf Use fs.readFileSync to read config file instead of require 2024-08-13 11:19:38 +03:00
Vitaliy Filippov 36dc6298d2 Use webpack to pack 2024-08-13 02:20:08 +03:00
Vitaliy Filippov bc2d637578 Add installation instructions for Vitastor backend 2024-08-12 01:36:42 +03:00
Vitaliy Filippov b543695048 Add example Vitastor backend configs 2024-08-11 17:24:05 +03:00
Vitaliy Filippov 90024d044d Configure "legacy" werelogs because otherwise MultipleBackendGateway was skipping messages 2024-08-04 01:22:48 +03:00
Vitaliy Filippov 451ab33f68 Use config.workers instead of config.clusters 2024-08-03 14:10:39 +03:00
Vitaliy Filippov c86107e912 Add authdata config file reference to config.json 2024-08-03 01:36:01 +03:00
Vitaliy Filippov 0a5962f256 Require scality kms only if kms backend is scality 2024-08-03 01:29:04 +03:00
Vitaliy Filippov 0e292791c6 Setup backends in config.json 2024-08-02 01:45:38 +03:00
Vitaliy Filippov fc07729bd0 Use ^versions 2024-08-02 01:44:13 +03:00
Vitaliy Filippov 4527dd6795 Do not store actual configs in git 2024-08-01 15:52:02 +03:00
Vitaliy Filippov 05fb581023 Use x-amz-storage-class instead of x-amz-meta-scal-location-constraint
FIXME: Ideally, both locations and storage classes should be supported
2024-07-28 02:00:38 +03:00
Vitaliy Filippov 956739a04e Use internal vaultclient for utapi server 2024-07-23 16:32:48 +03:00
Vitaliy Filippov 7ad0888a66 Change git dependency URLs 2024-07-21 17:36:47 +03:00
Vitaliy Filippov bf01ba4ed1 Change git dependency URLs 2024-07-21 15:26:06 +03:00
Vitaliy Filippov ab019e7e50 Make vaultclient dependency optional 2024-07-21 14:19:54 +03:00
Vitaliy Filippov 3797695e74 Make bucketclient dependency optional 2024-07-18 11:17:05 +03:00
Vitaliy Filippov c8084196c4 Remove remote management 2024-07-16 20:34:11 +03:00
bert-e b72e918ff9 Merge branch 'w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.8/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 22887f47d8 Merge branch 'w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 0cd10a73f3 Merge branch 'w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
bert-e e139406612 Merge branch 'bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
Maha Benzekri d91853a38b
processBucketPolicy fixup for objectDelete
Introduced by https://github.com/scality/cloudserver/pull/5580
we now do send a requestContext with no specific resource instead
of "null", which results in a policy evaluation error.
As we get an implicit deny for the requestType "objectDelete",
cause the processed result to be false , thus sending an empty
array of objects to vault , resulting in a deny even when the policy
allows the action on specific objects.

Linked Issue : https://scality.atlassian.net/browse/CLDSRV-555
2024-07-15 14:20:08 +02:00
Mickael Bourgois a7e798f909
CLDSRV-544: bump version 8.8.27 2024-07-03 19:08:02 +02:00
Mickael Bourgois 3a1ba29869
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-544-stderr' into w/8.8/improvement/CLDSRV-544-stderr 2024-07-03 19:07:41 +02:00
Mickael Bourgois dbb9b6d787
CLDSRV-544: bump version 8.7.48 2024-07-03 18:52:35 +02:00
Mickael Bourgois fce76f0934
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-544-stderr' into w/8.7/improvement/CLDSRV-544-stderr 2024-07-03 18:52:20 +02:00
Mickael Bourgois 0e39aaac09
CLDSRV: bump version 8.6.27 2024-07-03 18:48:28 +02:00
Mickael Bourgois 0b14c93fac
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-544-stderr' into w/8.6/improvement/CLDSRV-544-stderr 2024-07-03 18:48:12 +02:00
Mickael Bourgois ab2960bbf4
CLDSRV-544: bump version 2024-07-01 12:28:23 +02:00
Mickael Bourgois 7305b112e2
Merge remote-tracking branch 'origin/improvement/CLDSRV-544-stderr' into w/7.70/improvement/CLDSRV-544-stderr 2024-07-01 12:28:07 +02:00
Mickael Bourgois cd9e2e757b
CLDSRV-544: bump version 2024-06-30 21:15:52 +02:00
Mickael Bourgois ca0904f584
CLDSRV-544 Add timestamp on stderr utapi v1 2024-06-30 21:15:52 +02:00
Mickael Bourgois 0dd3dd35e6
CLDSRV-544: Add timestamp on stderr
The previous version would not exit the master of the cluster
Now it exits as it should do
2024-06-30 21:15:52 +02:00
bert-e bf7e4b7e23 Merge branch 'w/8.7/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:30 +00:00
bert-e 92f4794727 Merge branch 'w/8.6/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:29 +00:00
Jonathan Gramain c6ef85e3a1 Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-fixup-version' into w/8.6/bugfix/CLDSRV-547-fixup-version 2024-06-27 14:05:27 -07:00
Jonathan Gramain c0fe0cfbcf CLDSRV-547 [fixup] bump version to 7.70.49
Fixup the version, as 7.70.48 was already tagged
2024-06-27 11:42:37 -07:00
bert-e 9c936f2b83 Merge branch 'w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
bert-e d26bac2ebc Merge branch 'w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
Jonathan Gramain cfb9db5178 Merge branch 'w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:53:41 -07:00
Jonathan Gramain 2ce004751a Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:32:45 -07:00
Jonathan Gramain 539219e046 CLDSRV-547 bump cloudserver version 2024-06-27 10:27:45 -07:00
Jonathan Gramain be49e55db5 bf: CLDSRV-547 update redis config for utapi reindex
Update the redis configuration of utapi reindex to include a list of
sentinels, rather than a single sentinel (previously set to
"localhost" in Federation).

I took this opportunity to cleanup tech debt related to parsing redis
configuration, using "joi" for validation instead and making it common
across the three different places where redis config is parsed. Not
doing so would have required yet another copy-paste of dumb and
error-prone validation code. Added unit tests for the new validation.
2024-06-27 10:25:10 -07:00
bert-e e6b240421b Merge branch 'w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.8/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
bert-e 81739e3ecf Merge branch 'w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
Jonathan Gramain c475503248 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-25 18:40:18 -07:00
bert-e 7acbd5d2fb Merge branch 'bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:39:02 +00:00
Jonathan Gramain 8d726322e5 CLDSRV-549 restore 'git.commit-sha' and 'git.repository' labels
Add back the 'git.commit-sha' and 'git.repository' labels to pushed
images, which were not attached anymore after the change of registry.
2024-06-25 18:26:54 -07:00
williamlardier 4f7aa54886 CLDSRV-541: bump project version 2024-06-13 13:58:54 +02:00
williamlardier 0117a5b0b4 CLDSRV-541: add unit test for deleteobjects authz 2024-06-13 13:58:54 +02:00
williamlardier f679831ba2 CLDSRV-541: update unit tests 2024-06-13 13:56:18 +02:00
williamlardier bb162ca7d3 CLDSRV-541: send request context in deleteobjects to get quota information 2024-06-13 11:58:33 +02:00
williamlardier 0c6dfc7b6e CLDSRV-537: bump project version 2024-05-31 13:47:26 +02:00
williamlardier d608d849df CLDSRV-537: bump checkout version for alerts 2024-05-31 13:47:26 +02:00
williamlardier 2cb63f58d4 CLDSRV-537: bump action-prom-render-test version 2024-05-31 13:44:05 +02:00
williamlardier 51585712f4 CLDSRV-537: do not raise quota error if no quota is defined
This ensures fresh installs, or buckets that get empty-ed are
not triggering the alert by mistake
2024-05-31 13:44:05 +02:00
bert-e 61eb24e46f Merge branch 'w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a34b162782 Merge branch 'w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.8/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a9e50fe046 Merge branch 'w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
bert-e 4150a8432e Merge branch 'bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
Taylor McKinnon 7e70ff9cbc Disable git clone protection to work around git bug affecting git-lfs 2024-05-22 10:05:17 -07:00
bert-e 09dc45289c Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:31 +00:00
bert-e 47c628e0e1 Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:30 +00:00
Nicolas Humbert a1f4d3fe8a CLDSRV-529 use shorthand utapi dependency format 2024-05-17 15:10:40 +02:00
williamlardier 926242b077 CLDSRV-553: bump project version 2024-05-17 12:35:59 +02:00
williamlardier aa2aac5db3 CLDSRV-553: functional restore test to simulate cold backend calls 2024-05-17 12:35:59 +02:00
williamlardier f2e2d82e51 CLDSRV-553: unit test the onlyCheckQuota flag 2024-05-17 12:35:59 +02:00
williamlardier 88ad86b0c6 CLDSRV-553: adapt calls to quota evaluation
When the API is being called by a cold backend, the
x-scal-s3-version-id header is set. In this case, the quotas must
be evaluated with a 0 inflight.
2024-05-17 12:35:59 +02:00
bert-e 8f25892247 Merge branch 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:32 +00:00
bert-e 9ac207187b Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:31 +00:00
Anurag Mittal 624a04805f
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-529/bump_utapi' into w/8.6/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:40:00 +02:00
Anurag Mittal ba99933765
Merge remote-tracking branch 'origin/bugfix/CLDSRV-529/bump_utapi' into w/7.70/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:36:36 +02:00
williamlardier 38d1ac1d2c CLDSRV-553: conditionnaly force evaluating quotas with 0 inflight
A corner case was found, where any PUT from the cold backend would
fail if the quota is already exceeded, as the storage was reserved
for the restore, but the restore itself requires some more bytes
as inflights when evaluating quotas. By passing a flag in the quota
evaluation function, we ensure that we can, in these cases,
evaluate the quotas with 0 inflight.
2024-05-17 08:06:35 +02:00
Taylor McKinnon 4f34a34a11 bf(CLDSRV-529): Bump version 2024-05-16 12:19:45 -07:00
Taylor McKinnon 53f2a159fa bf(CLDSRV-529): Bump utapi 2024-05-16 12:18:24 -07:00
Maha Benzekri 63f6a75a86
CLDSRV-530: bump project version 2024-05-10 18:36:01 +02:00
Maha Benzekri 41acc7968e
CLDSRV-530: from accountwithQuota to accountWithQuotaCount 2024-05-10 18:32:07 +02:00
williamlardier c98c5207fc CLDSRV-520: bump project version 2024-05-10 09:51:02 +02:00
williamlardier 615ee393a4 CLDSRV-520: fix federation image with tsc 2024-05-10 09:51:02 +02:00
williamlardier 97dfc699aa CLDSRV-520: bump cloudserver version 2024-05-10 08:12:35 +02:00
williamlardier 76786282d1 CLDSRV-520: deduplicate quota logic 2024-05-10 08:12:35 +02:00
williamlardier a19d6524be CLDSRV-520: generic quota retrieval latency dashboard 2024-05-10 08:12:35 +02:00
williamlardier bbf6dfba22 CLDSRV-520: monitor quota cleanup
The finalization of quota logic will always be executed. Some tests
are added to ensure the inflights are only cleaned when they are
enabled, and an error happened in the API.
In any case, this ensures we monitor quotas in a single place,
for each of the executed action, and compute correctly the total
duration of the quota impact on the API.
2024-05-10 08:11:27 +02:00
williamlardier f0663fd507 CLDSRV-520: add dashboards 2024-05-10 08:11:27 +02:00
williamlardier d4decbbd6c CLDSRV-520: add alerts 2024-05-10 08:11:27 +02:00
williamlardier 288b2b7b87 CLDSRV-520: observe number of buckets and accounts with quota 2024-05-10 08:11:27 +02:00
williamlardier ccf9b62e59 CLDSRV-520: observe metrics during quota evaluations 2024-05-10 08:11:27 +02:00
williamlardier 9fc2d552ae CLDSRV-520: add metrics for quota 2024-05-07 17:56:24 +02:00
williamlardier d7cc4cf7d5 CLDSRV-515: adapt dockerfile for scubaclient 2024-05-07 16:24:25 +02:00
williamlardier 334d33ef44 CLDSRV-515: unit testing 2024-05-07 16:24:25 +02:00
williamlardier 989b0214d9 CLDSRV-515: functional testing 2024-05-07 16:21:13 +02:00
williamlardier 04d0730f97 CLDSRV-515: clear inflights in case of quota exceeded
- If the quotas are evaluated with success and inflights are
  enabled, it means the quota service will store the information
  and persist it till the next update of the utilization metrics.
  In this case, aany API that will fail after authorization would
  still mean that the bytes are considered, even if nothing was
  written. To overcome that, we call a function from the quota
  evaluation logic to erase anything that we wrote during the
  authorization.
2024-05-07 16:21:13 +02:00
williamlardier fbc642c022 CLDSRV-515: evaluate quotas
Quotas are evaluated:
- As part of the authorization process, after both the bucket and
  the object are authorized. The checks are skipped if the API does
  not need any quota evaluation, if the inflight bytes are 0 (i.e.,
  no data added, so no need to check the quota).
- The Copy APIs will evaluate the quotas when the source object is
  checked. In this particular case, the action is objectGet, so a
  flag is passed to force the quota evaluation logic. A subsequent
  check is done in the logic.
- The restoreObject API has a special case where the extension of
  the restoration duration would still cause the evaluation of the
  quotas, causing a potential increase in the inflights stored. We
  detect this case and remove any added inflight.
2024-05-07 16:21:13 +02:00
williamlardier 104435f0b6 CLDSRV-515: implement the quota logic as an helper file 2024-05-07 16:21:13 +02:00
williamlardier a362ac202e CLDSRV-515: bootstrap scuba on startup 2024-05-07 16:21:13 +02:00
williamlardier 1277e58150 CLDSRV-515: create a wrapper for scubaclient and quota service 2024-05-07 16:21:13 +02:00
williamlardier 7727ccf5f0 CLDSRV-515: add configuration for quotas
- Quota service is generic. We only support scuba backend now,
  but we can add others later, if needed, as long as they share
  the same implementation as the scuba client.
- Scuba configuration is passed for the scubaclient tool.
- Ability to disable the inflights is provided. This changes the
  behavior of the quota checks, so that the inflights won't be
  part of the request to the utilization metrics services. This
  reduces the complexity of the quota evaluation logic in case
  of error, as no cleanup will be needed in this case. This,
  however, requires a backend that can provide up to date metrics
  (i.e., <2s).
2024-05-05 15:31:34 +02:00
williamlardier 71860fc90c CLDSRV-515: do not recreate variable at every authz 2024-05-05 15:31:04 +02:00
williamlardier e504b52de7 CLDSRV-515: bump arsenal and vaultclient, introduce scubaclient 2024-05-02 15:09:23 +02:00
Maha Benzekri b369a47c4d CLDSRV-516: add tests 2024-05-02 14:44:31 +02:00
Maha Benzekri b4fa81e832 CLDSRV-516: implement BucketDeleteQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 1e03d53879 CLDSRV-516: implement BucketGetQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 63e502d419 CLDSRV-516: implement UpdateBucketQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri d2a31dc20a CLDSRV-516: specify the signature version of old auth tests
This is unrelated to the quotas, but blocks the CI.
2024-05-02 14:44:28 +02:00
Maha Benzekri f24411875f CLDSRV-516: introduce quota APIs in router 2024-05-02 14:28:56 +02:00
Maha Benzekri 4fd7faa6a3 CLDSRV-516: bump arsenal version 2024-05-02 14:27:44 +02:00
Francois Ferrand 118aaba702
Use sproxyd from ghcr
Issue: CLDSRV-524
2024-04-18 20:38:37 +02:00
Francois Ferrand e4442fdc52
Merge branch 'w/8.7/improvement/CLDSRV-524' into w/8.8/improvement/CLDSRV-524 2024-04-16 18:36:03 +02:00
Francois Ferrand 7fa199741f
Merge branch 'w/8.6/improvement/CLDSRV-524' into w/8.7/improvement/CLDSRV-524 2024-04-16 18:35:32 +02:00
Francois Ferrand f7f95af78f
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 18:34:49 +02:00
Francois Ferrand 2dc053a784
Merge branch 'w/7.70/improvement/CLDSRV-524' into w/8.6/improvement/CLDSRV-524 2024-04-16 17:57:54 +02:00
Francois Ferrand cc9bb9047e
Merge branch 'improvement/CLDSRV-524' into w/7.70/improvement/CLDSRV-524 2024-04-16 16:58:57 +02:00
Francois Ferrand b824fc0828
Use official docker build steps
The docker-build step from `scality/workflows/` fails to login to
 ghcr, as it picks up the old registry creds.

Issue: CLDSRV-524
2024-04-16 16:54:51 +02:00
Francois Ferrand a2e6d91cf2
Build pykmip image
Issue: CLDSRV-524
2024-04-16 16:54:41 +02:00
Francois Ferrand c1060853dd
Upgrade actions
- artifacts@v4
- cache@v4
- checkout@v4
- codeql@v3
- dependency-review@v4
- login@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5

Issue: CLDSRV-524
2024-04-16 16:54:23 +02:00
Francois Ferrand 227d6edd09
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 16:54:02 +02:00
bert-e b4754c68ea Merge branches 'w/8.8/bugfix/CLDSRV-518/duplication' and 'q/5548/8.7/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.8 2024-03-25 12:56:17 +00:00
bert-e 11aea5d93b Merge branches 'w/8.7/bugfix/CLDSRV-518/duplication' and 'q/5548/8.6/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.7 2024-03-25 12:56:17 +00:00
bert-e 0c50a5952f Merge branches 'w/8.6/bugfix/CLDSRV-518/duplication' and 'q/5548/7.70/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.6 2024-03-25 12:56:16 +00:00
bert-e 4a32e05855 Merge branches 'w/7.70/bugfix/CLDSRV-518/duplication' and 'q/5548/7.10/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/7.70 2024-03-25 12:56:16 +00:00
bert-e 402ed21b14 Merge branch 'bugfix/CLDSRV-518/duplication' into q/7.10 2024-03-25 12:56:16 +00:00
Nicolas Humbert a22719ed47 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-518/duplication' into w/8.8/bugfix/CLDSRV-518/duplication 2024-03-20 08:48:00 +01:00
Nicolas Humbert 41975d539d Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-518/duplication' into w/8.7/bugfix/CLDSRV-518/duplication 2024-03-19 18:12:42 +01:00
Nicolas Humbert c6724eb811 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-518/duplication' into w/8.6/bugfix/CLDSRV-518/duplication 2024-03-19 05:54:35 +01:00
Nicolas Humbert d027006938 Merge remote-tracking branch 'origin/bugfix/CLDSRV-518/duplication' into w/7.70/bugfix/CLDSRV-518/duplication 2024-03-14 20:50:08 +01:00
Nicolas Humbert 92cfd47572 CLDSRV-518 Duplication of version ID in metadata 2024-03-14 16:33:25 +01:00
bert-e 8796bf0f44 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 735fcd04ef Merge branch 'w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
Jonathan Gramain c5522685b2 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:04:21 -07:00
Jonathan Gramain 48df7df271 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:02:52 -07:00
Jonathan Gramain e028eb227f CLDSRV-513 bump cloudserver 2024-03-13 14:00:55 -07:00
Nicolas Humbert caf3146662 CLDSRV-518 fix Ruby dependency: excon
(cherry picked from commit cc1607eaaecb97ab5c48da15f1b1449fe7a4680f)
2024-03-13 13:58:41 -07:00
bert-e 1dee707eb8 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 17:36:39 +00:00
Jonathan Gramain 2c8d69c20a Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 10:18:39 -07:00
Jonathan Gramain 0b2b6ceeb5 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:46:43 -07:00
Jonathan Gramain f4b3f39dc6 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:39:49 -07:00
Jonathan Gramain 84260340d0 CLDSRV-513 bump arsenal dependency 2024-03-13 09:34:30 -07:00
Jonathan Gramain e531abc346 bf: CLDSRV-513 fix request logger for batchDelete
Arsenal's `DataWrapper.batchDelete()` now already creates a request
logger on which it calls `end()` to get the elapsed time. So as
there's no need to create one before the call, remove the
corresponding code.

Note that the main fix is the arsenal version bump which, by creating
a request logger, fixes naturally the forgotten case in
`checkHashMatchMD5`.
2024-03-13 09:31:10 -07:00
Jonathan Gramain 20f6e3089b CLDSRV-513 bump werelogs dependency 2024-03-13 09:31:10 -07:00
bert-e 9dc34f2155 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:14 +00:00
bert-e 08a4c3ade3 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:13 +00:00
Nicolas Humbert d5c731856b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:51:36 +01:00
Nicolas Humbert 584c94692b Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:16:03 +01:00
Nicolas Humbert a0e5257c75 CLDSRV-501 bump arsenal 2024-03-07 10:09:28 +01:00
bert-e 5435c14116 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:40 +00:00
bert-e 38c44ea874 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:39 +00:00
Nicolas Humbert 4200346dd2 CLDSRV-501 skip tests related to Backbeat routes for replication 2024-03-01 17:16:36 +01:00
bert-e 5472d0da59 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
bert-e cdc0bb1128 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
Nicolas Humbert 795f8bcf1c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:44:42 +01:00
Nicolas Humbert 9371d8d734 Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-02-29 08:56:30 +01:00
Nicolas Humbert 3f31c7f3a1 CLDSRV-501 PutMetadata should write metadata on top of a null version 2024-02-27 14:29:35 +01:00
KillianG 39cba3ee6c
Merge remote-tracking branch 'origin/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust' into w/8.8/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust 2024-02-27 11:31:55 +01:00
KillianG a00952712f
Bump 8.7.47
Issue: CLDSRV-512
2024-02-27 10:41:34 +01:00
KillianG a246e18e17
Updatest test for startRestore
Issue: CLDSRV-512
2024-02-27 10:26:19 +01:00
KillianG 3bb3a4d161
Use scaledMsPerDay when restore-adjust
Use scaledMsPerday when restoring an object that has already been restored to be able to make the time go faster for testing purpose

Issue: CLDSRV-512
2024-02-27 10:26:11 +01:00
bert-e c6ba7f981e Merge branches 'w/8.8/bugfix/CLDSRV-498/null' and 'q/5526/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.8 2024-02-21 13:57:14 +00:00
bert-e 69c82da878 Merge branches 'w/8.6/bugfix/CLDSRV-498/null' and 'q/5526/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.6 2024-02-21 13:57:13 +00:00
bert-e 762ae5a0ff Merge branches 'w/8.7/bugfix/CLDSRV-498/null' and 'q/5526/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.7 2024-02-21 13:57:13 +00:00
bert-e 89dfc794a6 Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/7.70 2024-02-21 13:57:12 +00:00
bert-e 3205d117f5 Merge branches 'w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.8 2024-02-20 13:05:07 +00:00
bert-e 4eafae44d8 Merge branches 'w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/7.70/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.6 2024-02-20 13:05:06 +00:00
bert-e 4cab3c84f3 Merge branches 'w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.7 2024-02-20 13:05:06 +00:00
bert-e e3301a2db9 Merge branch 'bugfix/CLDSRV-508-fix-bucket-tagging' into q/7.70 2024-02-20 13:05:05 +00:00
williamlardier 0dcc93cdbe Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:49:56 +01:00
williamlardier 2f2f91d6e8 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:48:05 +01:00
williamlardier a28b141dfb Merge remote-tracking branch 'origin/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:43:22 +01:00
williamlardier 46fe061895 CLDSRV-508: bump project version 2024-02-20 12:44:23 +01:00
williamlardier 34202eaa62 CLDSRV-508: add tests for bucket tagging APIs 2024-02-20 12:44:07 +01:00
williamlardier 4d343fe468 CLDSRV-508: standardize XML with object tagging API 2024-02-20 12:42:34 +01:00
williamlardier 229e641f88 CLDSRV-508: add missing parameters in buckjet tagging APIs 2024-02-20 12:42:18 +01:00
bert-e 1433973e5c Merge branch 'w/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 201170b1ed Merge branch 'w/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e f13985094e Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.6/bugfix/CLDSRV-498/null 2024-02-20 11:24:07 +00:00
Nicolas Humbert 395033acd2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-498/null' into w/7.70/bugfix/CLDSRV-498/null 2024-02-20 12:13:38 +01:00
Nicolas Humbert 632ef26826 CLDSRV-498 Handling isNull master version with no versionId
In certain cases, a master version may not have a versionId and be set as null (isNull:true). For instance, this occurs when a customer:

Create a bucket.

Put an object to it.

Put bucket versioning.

Put metadata (BackbeatClient.putMetadata), which results in the master version being set to null (isNull:true) with no versionId.

Currently, if an object is put after these steps, CloudServer fails to appropriately generate a null version. This is because CloudServer doesn't handle situations where the master version is set to isNull:true with no versionId.

The correct approach when an object is put should be to:

Create the new version key.

Create a new null version key, assigning it a “default non-version version id”.

Update this “default non-version version id” to the `nullVersionId` field of the master key.
2024-02-20 12:04:53 +01:00
bert-e 242b2ec85a Merge branches 'w/8.8/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.7/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.8 2024-02-19 15:00:59 +00:00
bert-e 3186a97113 Merge branches 'w/8.7/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.6/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.7 2024-02-19 15:00:59 +00:00
bert-e 3861b8d317 Merge branch 'q/5534/7.10/bugfix/CLDSRV-505-ip-handling-fix' into tmp/normal/q/7.70 2024-02-19 15:00:58 +00:00
bert-e bb278f7d7e Merge branches 'w/8.6/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/7.70/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.6 2024-02-19 15:00:58 +00:00
bert-e 3b9309490d Merge branch 'bugfix/CLDSRV-505-ip-handling-fix' into q/7.10 2024-02-19 15:00:57 +00:00
Will Toozs 0118dfabbb
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-505-ip-handling-fix' into w/8.8/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:40:58 +01:00
Will Toozs ff40dfaadf
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-505-ip-handling-fix' into w/8.7/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:25:18 +01:00
Will Toozs 9a31236da0
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-505-ip-handling-fix' into w/8.6/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:22:08 +01:00
Will Toozs 61ebacfbf3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-505-ip-handling-fix' into w/7.70/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 14:26:43 +01:00
Will Toozs aa646ced28
CLDSRV-505: bump CS version 2024-02-19 12:00:41 +01:00
Will Toozs f2ca37b5fb
CLDSRV-505: update ip check tests for arrays 2024-02-19 12:00:41 +01:00
Will Toozs 9d74cedde8
CLDSRV-505: update ip check for arrays 2024-02-19 12:00:41 +01:00
bert-e 9c99a6980f Merge branches 'w/8.8/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.8 2024-02-19 10:16:26 +00:00
bert-e d4e255781b Merge branches 'w/8.7/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.7 2024-02-19 10:16:26 +00:00
bert-e f5763d012e Merge branches 'w/8.6/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.70/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.6 2024-02-19 10:16:24 +00:00
bert-e 8fb740cf09 Merge branch 'bugfix/CLDSRV-507-bp-fixes' into q/7.10 2024-02-19 10:16:23 +00:00
bert-e 55c8d89de2 Merge branches 'w/7.70/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.10/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/7.70 2024-02-19 10:16:23 +00:00
bert-e 1afaaec0ac Merge branch 'w/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.8/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:24 +00:00
bert-e e20e458971 Merge branch 'w/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.7/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:23 +00:00
williamlardier 56e52de056 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-507-bp-fixes' into w/8.6/bugfix/CLDSRV-507-bp-fixes 2024-02-19 10:01:09 +01:00
williamlardier d9fc4aae50 Merge remote-tracking branch 'origin/bugfix/CLDSRV-507-bp-fixes' into w/7.70/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:54:06 +01:00
williamlardier 08de09a2ab CLDSRV-507: bump arsenal version 2024-02-19 09:48:13 +01:00
bert-e bef9220032 Merge branches 'w/8.8/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.8 2024-02-15 18:43:31 +00:00
bert-e de20f1efdc Merge branches 'w/8.7/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.6/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.7 2024-02-15 18:43:31 +00:00
bert-e 4817f11f36 Merge branches 'w/8.6/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.70/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.6 2024-02-15 18:43:30 +00:00
bert-e a6b283f5a2 Merge branch 'bugfix/CLDSRV-497/putmetadata' into q/7.10 2024-02-15 18:43:29 +00:00
bert-e 3f810a7596 Merge branches 'w/7.70/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.10/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/7.70 2024-02-15 18:43:29 +00:00
bert-e b89d19c9f8 Merge branch 'w/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:58:27 +00:00
Nicolas Humbert 4dc9788629 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-497/putmetadata' into w/8.7/bugfix/CLDSRV-497/putmetadata 2024-02-15 18:43:28 +01:00
Nicolas Humbert 65a891d6f8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-497/putmetadata' into w/8.6/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:51:48 +01:00
bert-e 2ecca4feef Merge branch 'bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/7.70/bugfix/CLDSRV-497/putmetadata 2024-02-15 16:34:04 +00:00
Nicolas Humbert c52a3a6e44 CLDSRV-497 Fix BackbeatClient.putMetadata with versionID
Issue: When Cloudserver BackbeatClient.putMetadata() option fields are sent to Metadata through the query string, they are converted to strings. As a result, Metadata interprets the value undefined in the versionId field as an empty string ('').

Background: Previously, the 'crrExistingObject' script used this bug/behavior as a workaround to generate an internal version ID to replicate null version (= objects created before versioning was enabled). However, this approach has led to inconsistencies, occasionally resulting in the creation of multiple null internal versions.

Resolution: To address this issue, the 'crrExistingObject' workaround will be deprecated. Instead, Backbeat will be enhanced to support the replication of null versions directly, thereby ensuring more reliable and consistent behavior in handling versioning.
2024-02-15 17:31:23 +01:00
williamlardier d82965ff78 CLDSRV-507: normalize request types 2024-02-15 09:26:45 +01:00
williamlardier f488a65f15 CLDSRV-507: support no object metadata for MPU APIs resource authz
The MPU APIs are dealing with object resources. At the time the
Bucket Policies and ACLs were only evaluated when there was an
Allow from IAM, there was no need to handle this case.
However now, these APIs are evaluating the bucket policies and
ACLs and because there is no object metadata associated, we
end up allowing requests without any permission by relying
on the existing code, where the permission is changed and becomes
"bucketGet". We must treat MPU APIs as different APIs and check
the right permission. For that, we rely on the updated bucket
policy action map in arsenal wth these APIs, and ensure that we
properly map that to the existing logic where we only checked the
"objectPut" permission to handle these 3 specific APIs:

- initiate MPU
- upload part
- complete MPU
2024-02-14 15:16:48 +01:00
williamlardier 40a575a717 CLDSRV-507: use correct action for put part APIs 2024-02-14 15:16:48 +01:00
williamlardier fea82f15ea CLDSRV-507: use correct action for MPU 2024-02-14 15:16:48 +01:00
bert-e 06dc042154 Merge branches 'w/8.8/improvement/CLDSRV-502' and 'q/5528/8.7/improvement/CLDSRV-502' into tmp/octopus/q/8.8 2024-02-08 13:49:18 +00:00
bert-e aa4643644a Merge branches 'w/8.7/improvement/CLDSRV-502' and 'q/5528/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.7 2024-02-08 13:49:18 +00:00
bert-e 89edf7e3d0 Merge branch 'w/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.6 2024-02-08 13:49:18 +00:00
Francois Ferrand 4c7d3ae4bc
Merge branch 'w/8.7/improvement/CLDSRV-502' into w/8.8/improvement/CLDSRV-502 2024-02-05 18:50:27 +01:00
Francois Ferrand 23883dae8b
Merge branch 'w/8.6/improvement/CLDSRV-502' into w/8.7/improvement/CLDSRV-502 2024-02-05 18:50:12 +01:00
Francois Ferrand e616ffa374
gha: fix test alert trigger to match other premerge build
Issue: CLDSRV-502
2024-02-05 18:49:31 +01:00
Francois Ferrand 515c20e4cf
Merge branch 'w/7.70/improvement/CLDSRV-502' into w/8.6/improvement/CLDSRV-502 2024-02-05 18:48:18 +01:00
Francois Ferrand f8eedddebf
Merge branch 'improvement/CLDSRV-502' into w/7.70/improvement/CLDSRV-502 2024-02-05 18:48:01 +01:00
Francois Ferrand f3654e4fb8
Fix trigger for codeql jobs
Build on pull request & bert-e queue build, and skip rebuild when PR
lands on development branch.

Issue: CLDSRV-502
2024-02-05 18:47:30 +01:00
Francois Ferrand 517fb99190
gha: add release name to release job
Issue: CLDSRV-502
2024-02-05 18:46:02 +01:00
Francois Ferrand 531c83a359
Release 8.8.17
Issue: CLDSRV-500
2024-02-05 17:35:43 +01:00
Francois Ferrand b84fa851f7
Merge branch 'w/8.7/bugfix/CLDSRV-500' into w/8.8/bugfix/CLDSRV-500 2024-02-05 17:35:20 +01:00
Francois Ferrand 4cb1a879f7
Release 8.7.44
Issue: CLDSRV-500
2024-02-05 17:34:45 +01:00
Francois Ferrand 7ae55b20e7
Merge branch 'bugfix/CLDSRV-500' into w/8.7/bugfix/CLDSRV-500 2024-02-05 17:32:53 +01:00
Francois Ferrand d0a6fa17a5
Release 8.6.24
Issue: CLDSRV-500
2024-02-05 17:31:36 +01:00
Francois Ferrand 7275459f70
Use rate interval in `Request time` panel
- Should use $__rate_interval, which handles small time range.
- Regenerating the dashboard also fixes the 'latency per s3 action'
  panel.

Issue: CLDSRV-500
2024-02-01 15:49:29 +01:00
Hervé Dombya 363afcd17f CLDSRV-473: fix cors issues in getVeeamFile 2024-01-26 15:59:10 +01:00
Frédéric Meinnel 1cf0250ce9 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.8/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:06:05 +01:00
Frédéric Meinnel 20d0b38d0b Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:05:39 +01:00
Frédéric Meinnel 9988a8327a Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:06:31 +01:00
Frédéric Meinnel b481d24637 Merge remote-tracking branch 'origin/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:01:59 +01:00
Frédéric Meinnel 71625774c1 CLDSRV-494: version bump 2024-01-23 13:42:36 +01:00
Frédéric Meinnel 9b9338f2b8 CLDSRV-494: Fix generateV4Headers for HTTP PUT with body 2024-01-23 13:42:31 +01:00
Frédéric Meinnel 601619f200 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.8/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:24:05 +01:00
Frédéric Meinnel a92e71fd50 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:22:55 +01:00
Frédéric Meinnel 8802ea0617 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:21:42 +01:00
Frédéric Meinnel acc5f74787 Merge remote-tracking branch 'origin/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:20:10 +01:00
Frédéric Meinnel e3c093f352 CLDSRV-493: Version bump 2024-01-17 13:18:32 +01:00
Frédéric Meinnel e17383a678 CLDSRV-493: Fix dates accepted in lifecycle configuration 2024-01-17 13:18:32 +01:00
bert-e 43f62b847c Merge branch 'w/8.7/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.8/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e a031905bba Merge branch 'w/8.6/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.7/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e 13ad6881f4 Merge branch 'bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.6/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:38 +00:00
Mickael Bourgois dea5173075
CLDSRV-492: remove duplicate error monitoring 2024-01-16 21:34:26 +01:00
Mickael Bourgois b3f96198fe
CLDSRV-492: update monitoring head 2024-01-15 14:48:08 +01:00
Mickael Bourgois 5e2dd8cccb
Merge remote-tracking branch 'origin/development/7.70' into bugfix/CLDSRV-492-head-monitoring 2024-01-15 11:56:50 +01:00
bert-e cd2406b827 Merge branches 'w/8.8/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.8 2024-01-15 09:47:24 +00:00
bert-e 62f707caff Merge branches 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.7 2024-01-15 09:47:23 +00:00
bert-e f01ef00a52 Merge branches 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.6 2024-01-15 09:47:23 +00:00
bert-e 30fb64e443 Merge branch 'bugfix/CLDSRV-489-redirect-folder-index' into q/7.10 2024-01-15 09:47:22 +00:00
bert-e 054107d8fb Merge branches 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.10/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/7.70 2024-01-15 09:47:22 +00:00
bert-e 848bf318fe Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:46 +00:00
bert-e 0beb48a1fd Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:45 +00:00
bert-e 618d4dffc7 Merge branches 'development/8.6' and 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:44 +00:00
bert-e b5aae192f7 Merge branches 'development/7.70' and 'bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:43 +00:00
Mickael Bourgois 557f3dcde6
CLDSRV-489: fix lint indentation 2024-01-12 10:07:39 +01:00
Mickael Bourgois 3291af36bb
CLDSRV-489: Apply style suggestions
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-12 09:53:57 +01:00
Will Toozs d274acd8ed
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-436-bump-version' into w/8.8/improvement/CLDSRV-436-bump-version 2024-01-11 13:10:57 +01:00
Will Toozs e6d9e8fc35
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-436-bump-version' into w/8.7/improvement/CLDSRV-436-bump-version 2024-01-11 11:50:25 +01:00
Will Toozs b08edefad6
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bump-version' into w/8.6/improvement/CLDSRV-436-bump-version 2024-01-11 11:24:50 +01:00
Will Toozs e9c353d62a
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bump-version' into w/7.70/improvement/CLDSRV-436-bump-version 2024-01-11 11:04:53 +01:00
Will Toozs c7c55451a1
CLDSRV-436: bump package version 2024-01-11 10:45:47 +01:00
bert-e 7bb004586d Merge branch 'w/8.7/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.8/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:49 +00:00
bert-e d48de67723 Merge branch 'w/8.6/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.7/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:48 +00:00
Will Toozs fa4dec01cb
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bp-conds' into w/8.6/improvement/CLDSRV-436-bp-conds 2024-01-10 22:00:37 +01:00
Will Toozs 4f79a9c59c
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bp-conds' into w/7.70/improvement/CLDSRV-436-bp-conds 2024-01-10 21:43:08 +01:00
Will Toozs 05c759110b
CLDSRV-436: update dummyRequest of unit tests 2024-01-10 21:02:15 +01:00
Will Toozs deae294a81
CLDSRV-436: unit test policy condition validation 2024-01-10 21:02:15 +01:00
Will Toozs ab587385e6
CLDSRV-436: add functional test cases for conditions 2024-01-10 21:01:44 +01:00
Will Toozs 6243911072
CLDSRV-436: update tests 2024-01-10 20:59:26 +01:00
Will Toozs da804054e5
CLDSRV-436: update put retention logic 2024-01-10 20:57:38 +01:00
Will Toozs 493a6da773
CLDSRV-436: update put policy logic 2024-01-10 20:57:38 +01:00
Will Toozs 7ecdd11783
CLDSRV-436: add conditions logic 2024-01-10 20:57:37 +01:00
Mickael Bourgois 7e53b67c90
CLDSRV-492: fix monitoring for website head
Match head before the merging in CLDSRV-482
2024-01-10 20:29:20 +01:00
bert-e b141c59bb7 Merge branch 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 0b79ecd942 Merge branch 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 86ece5c264 Merge branch 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:56 +00:00
Mickael Bourgois 0b79cd6af6
Merge remote-tracking branch 'origin/bugfix/CLDSRV-489-redirect-folder-index' into w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 19:32:46 +01:00
Mickael Bourgois a51b5e0af3
CLDSRV-489: test redirect 302 on folder without / 2024-01-10 19:10:57 +01:00
bert-e 10ca6b98fa Merge branch 'w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.8/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 171925732f Merge branch 'w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
Taylor McKinnon 6d36f9c867 Merge remote-tracking branch 'origin/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 10:04:49 -08:00
Taylor McKinnon 1a21c4f867 impr(CLDSRV-475): Bump version to 7.70.41 2024-01-10 10:02:36 -08:00
Taylor McKinnon 866dec1b81 impr(CLDSRV-475): Add isDeleteMarker to overhead fields 2024-01-10 10:02:15 -08:00
Mickael Bourgois 9491e82235
CLDSRV-489: redirect 302 on folder without /
If a key is not found, we must check if key/index.html
is accessible to redirect to append a trailing /
to the key

@see https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html#IndexDocumentsandFolders
2024-01-10 17:39:13 +01:00
bert-e 70e8b20af9 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 0ec5f4fee5 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 6c468a01d9 Merge branch 'w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
bert-e 3d2b75f344 Merge branch 'bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
Mickael Bourgois 5811fa5326
CLDSRV-485: fix linter in tests for 8.6 2024-01-10 13:50:11 +01:00
bert-e e600677545 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 72e5da10b7 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
Mickael Bourgois de0e7e6449
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 13:15:29 +01:00
Mickael Bourgois 97b5ed6dd3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-485-custom-err-redirect' into w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:09:35 +01:00
Mickael Bourgois dad8a3ee37
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:02:54 +01:00
Mickael Bourgois 8aca658c5c
CLDSRV-485: bump arsenal 2024-01-10 11:52:27 +01:00
bert-e 759817c5a0 Merge branch 'w/8.7/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e 035c7e8d7f Merge branch 'w/8.6/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
Mickael Bourgois b8af1225d5
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-482-head-redirect-index' into w/8.6/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:28:13 +01:00
Mickael Bourgois 40faa5f3fa
Merge remote-tracking branch 'origin/bugfix/CLDSRV-482-head-redirect-index' into w/7.70/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:19:09 +01:00
Mickael Bourgois 1fc8622614
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:01:51 +01:00
Mickael Bourgois a0acefb4a8
CLDSRV-482: apply style suggestion
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2024-01-10 10:13:08 +01:00
bert-e de27a5b88e Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e a4cc5e45f3 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e 621cb33680 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
bert-e b025443d21 Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
Mickael Bourgois d502a81284
CLDSRV-488: fix lint 2024-01-10 09:56:27 +01:00
bert-e 9a8b707e82 Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:32 +00:00
bert-e 002dbe0019 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e 59e52f6df2 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e b52f2356ba Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:30 +00:00
Mickael Bourgois 60679495b6
CLDSRV-488: apply review suggestion
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-10 09:53:24 +01:00
Mickael Bourgois 9dfacd0827
CLDSRV-482: factorize website GET and HEAD 2024-01-09 18:45:02 +01:00
Mickael Bourgois 485ef1e9bb
CLDSRV-482: test routing and implicit index 2024-01-09 17:18:07 +01:00
Mickael Bourgois 5e041ca5e7
CLDSRV-482: fix head implicit index
Routing check must be performed before added index prefix
To prevent matching a routing rule on the index
2024-01-09 17:18:07 +01:00
Mickael Bourgois 52137772d9
Merge branch 'development/7.10' into bugfix/CLDSRV-488-error-type-bp 2024-01-09 16:44:18 +01:00
Mickael Bourgois fcf193d033
CLDSRV-488: move website condition, replace flag 2024-01-09 16:40:55 +01:00
Mickael Bourgois fb61cad786
CLDSRV-485: test website redirect custom error 2024-01-08 18:00:32 +01:00
Mickael Bourgois b6367eb2b8
CLDSRV-485: website redirect from custom error doc 2024-01-08 17:58:09 +01:00
bert-e d803bdcadc Merge branch 'w/8.7/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.8/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:57 +00:00
bert-e 4f1b8f25b7 Merge branch 'w/8.6/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.7/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 94363482c3 Merge branch 'w/7.70/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.6/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 6b0a8cb9ed Merge branch 'bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/7.70/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:55 +00:00
Will Toozs 5dbf5d965f
CLDSRV-477: add tests 2024-01-08 14:43:41 +01:00
Will Toozs ebefc4b5b0
CLDSRV-477: change position of ACL check 2024-01-08 14:43:40 +01:00
Mickael Bourgois ac1c75e414
CLDSRV-488: test website 404 with bucket policy 2024-01-05 12:52:51 +01:00
Mickael Bourgois fee4f3a96e
CLDSRV-488: fix website 404 with bucket policy
If bucket policy authorize access to a
non existant object, there should be a 404
and not a 403
2024-01-05 12:52:50 +01:00
bert-e e969eeaa20 Merge branches 'w/8.8/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.8 2024-01-05 11:24:59 +00:00
bert-e 2ee78bcf6a Merge branches 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.7 2024-01-05 11:24:58 +00:00
bert-e 64273365d5 Merge branches 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.6 2024-01-05 11:24:58 +00:00
bert-e 65c6bacd34 Merge branches 'w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.10/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/7.70 2024-01-05 11:24:57 +00:00
bert-e d60d252eaf Merge branch 'bugfix/CLDSRV-490-bucket-policy-resource' into q/7.10 2024-01-05 11:24:57 +00:00
bert-e f31fe2f2bf Merge branch 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.8/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
bert-e ee47cece90 Merge branch 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.7/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
Mickael Bourgois 7a5cddacbc
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into w/8.6/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:08:54 +01:00
Mickael Bourgois baa6203b57
Merge remote-tracking branch 'origin/bugfix/CLDSRV-490-bucket-policy-resource' into w/7.70/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:04:25 +01:00
Mickael Bourgois 141056637b
CLDSRV-490: bump version 2024-01-05 11:51:49 +01:00
Mickael Bourgois 0f007e0489
CLDSRV-490: fix linting in tests for 8.6 2024-01-05 11:51:48 +01:00
Mickael Bourgois 2d50a76923
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-486-object-redirect-root' into w/8.8/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:59:20 +01:00
Mickael Bourgois 6b4f10ae56
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-486-object-redirect-root' into w/8.7/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:57:36 +01:00
Mickael Bourgois 23eaf89cc3
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-486-object-redirect-root' into w/8.6/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:55:48 +01:00
Mickael Bourgois d6a2144508
Merge remote-tracking branch 'origin/bugfix/CLDSRV-486-object-redirect-root' into w/7.70/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:49:20 +01:00
Mickael Bourgois 40dd3f37a4
Merge branch 'development/7.10' into bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:36:03 +01:00
Mickael Bourgois d3307654a6
CLDSRV-486: bump cloudserver version 2024-01-04 16:34:10 +01:00
Mickael Bourgois e342a90b48
CLDSRV-486: bump arsenal version 2024-01-04 16:29:04 +01:00
williamlardier dbda5f16a6 CLDSRV-407: bump mongodb to v5.0 in CI 2024-01-04 14:04:20 +01:00
Mickael Bourgois d4a4825668
CLDSRV-490: test bucket policy with request 2024-01-04 10:18:36 +01:00
Mickael Bourgois 83b9e9a775
CLDSRV-490: fix missing request for bucket policy
If request is missing, bucket policy ignore resource
and apply effect to any matching principal and action
2024-01-03 18:24:54 +01:00
Maha Benzekri 2959c950dd
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.8/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:36:20 +01:00
Maha Benzekri 462ddf7ef1
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:34:44 +01:00
Maha Benzekri fda42e7399
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:32:41 +01:00
Maha Benzekri edbd6caeb4
Merge remote-tracking branch 'origin/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 09:38:25 +01:00
Maha Benzekri 1befaa1f28
CLDSRV-480: CLDSRV version bump 2024-01-03 09:35:19 +01:00
Maha Benzekri 0cefca831d
CLDSRV-480: condition check fix for isImplicit 2024-01-03 09:34:19 +01:00
Jonathan Gramain ea7b69e313 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:26:27 -08:00
Jonathan Gramain 8ec1c2f2db Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:08:40 -08:00
Jonathan Gramain 3af6ca5f6d Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:06:45 -08:00
Jonathan Gramain 997d71df08 Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 14:49:08 -08:00
Jonathan Gramain 275ebcec5c CLDSRV-478 bump cloudserver version 2024-01-02 14:45:56 -08:00
Mickael Bourgois 8b77530b2b
CLDSRV-486: fix object redirect to root / 2024-01-02 19:16:32 +01:00
bert-e 43f9606598 Merge branch 'w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:02 +00:00
bert-e be34e5ad59 Merge branch 'w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:01 +00:00
Jonathan Gramain 5bc64ede43 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:41:03 -08:00
Jonathan Gramain 911010376e Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:26:30 -08:00
Jonathan Gramain b5ec37b38b bf: CLDSRV-478 bump arsenal dependency 2024-01-02 09:19:15 -08:00
Mickael Bourgois 3ce869cea3
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-479-website-fqdn-index' into w/8.8/bugfix/CLDSRV-479-website-fqdn-index
# Conflicts:
#	package.json
2024-01-02 11:40:28 +01:00
Mickael Bourgois b7960784db
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-479-website-fqdn-index' into w/8.7/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:35:36 +01:00
Mickael Bourgois 5ac10cefa8
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-479-website-fqdn-index' into w/8.6/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:33:49 +01:00
Mickael Bourgois 2dafefd77f
Merge remote-tracking branch 'origin/bugfix/CLDSRV-479-website-fqdn-index' into w/7.70/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:29:47 +01:00
Mickael Bourgois 36f147b441
CLDSRV-479: update test bucket policy index 2024-01-02 11:13:40 +01:00
Mickael Bourgois 8ed447ba63
CLDSRV-479: helper function for index append 2024-01-02 10:27:38 +01:00
bert-e bf235f3335 Merge branch 'w/8.7/bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.8/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:53 +00:00
bert-e 569c9f4368 Merge branch 'bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:52 +00:00
Nicolas Humbert 92cf03254a CLDSRV-483 Improve Ruby test output readability and Enable backtrace 2023-12-31 11:08:21 +01:00
Nicolas Humbert c57ae9c8ea CLDSRV-483 Bump ruby patch version to fix malformed header response
More info about the malformed header response: https://github.com/excon/excon/issues/845
2023-12-31 11:08:16 +01:00
Mickael Bourgois 5bec42d051
CLDSRV-479: test index with bucket policy 2023-12-29 17:43:34 +01:00
Mickael Bourgois f427fc9b70
CLDSRV-479: bump version 2023-12-28 15:20:59 +01:00
Mickael Bourgois 9aad4ae3ea
CLDSRV-479: fix error on index using bucket policy
The variable holding the new objectKey with index suffix
is not propagated to bucket policy function.
_checkBucketPolicyResources function extract objectKey from request.
2023-12-28 15:20:48 +01:00
bert-e 1a3cb8108c Merge branch 'q/5495/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 042120b17e Merge branch 'q/5495/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e ba4593592d Merge branch 'w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 6efdb627da Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e 5306bf0b5c Merge branch 'q/5495/7.70/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 5b22819c3f Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 126ca3560f Merge branch 'improvement/CLDSRV-451-specific-7.70-apis-update' into q/7.70 2023-12-15 06:44:02 +00:00
bert-e e5b692f3db Merge branch 'w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.8/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:49 +00:00
bert-e 548ae8cd12 Merge branch 'w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:48 +00:00
Taylor McKinnon 80376405df Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:30:13 -08:00
Taylor McKinnon a612e5c27c Merge remote-tracking branch 'origin/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:27:03 -08:00
Taylor McKinnon c3b7662086 impr(CLDSRV-467): Bump Utapi dependency to 7.10.15 2023-12-14 10:17:18 -08:00
Taylor McKinnon 818b1e60d1 impr(CLDSRV-467): Add new Utapi Reindex option `utapi.reindex.onlyCountLatestWhenObjectLocked` 2023-12-14 10:17:18 -08:00
bert-e 2a919af071 Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:47 +00:00
bert-e 5c300b8b6c Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:46 +00:00
Maha Benzekri ad3ebd3db2
CLDSRV-451: fix on gettagging 2023-12-14 18:21:24 +01:00
Maha Benzekri 99068e7265
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:36:17 +01:00
Maha Benzekri cd039d8133
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update
In this commit the only api change compared to the 8.6 is the
routeVeeam.
2023-12-14 17:33:03 +01:00
Maha Benzekri dd3ec25d74
Merge remote-tracking branch 'origin/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update
In this merge, we have updated the tagging apis along with the
lifecycle apis and metadata search apis and objectRestore, unit test
for objectRestore has been updated as well.
2023-12-14 17:28:46 +01:00
Maha Benzekri 717228bdfc
CLDSRV-451: bump Cloudserver version 2023-12-14 16:59:10 +01:00
Maha Benzekri 836fc80560
CLDSRV-451: updating buckettagging apis for impDeny 2023-12-14 16:58:14 +01:00
Maha Benzekri 75b293df8d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.8/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:37:14 +01:00
Maha Benzekri a855e38998
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:35:02 +01:00
Maha Benzekri 51d5666bec
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:32:36 +01:00
Maha Benzekri ecb74a2db3
Merge remote-tracking branch 'origin/improvement/CLDSRV-431-misc-api-implicitDeny' into w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:26:57 +01:00
Maha Benzekri cdcdf8eff0
CLDSRV-431: cloudserver version bump 2023-12-14 12:22:42 +01:00
Maha Benzekri dc39b37877
CLDSRV-431: arsenal bump 2023-12-14 12:21:53 +01:00
Maha Benzekri 4897b3c720
CLDSRV-431: changes on misc api for impDeny 2023-12-13 11:14:21 +01:00
Maha Benzekri ffe4ea4afe
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.8/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 14:47:24 +01:00
Maha Benzekri a16cfad0fc
CLDSRV-474: mongodb_image on all jobs 2023-12-12 14:06:02 +01:00
bert-e 556163e3e9 Merge branch 'w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:55:56 +00:00
Maha Benzekri 8fe9f16661
CLDSRV-474: Removing the docker-compose commands from the tests.yaml 2023-12-12 13:53:53 +01:00
Maha Benzekri eb9ff85bd9
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 13:52:50 +01:00
bert-e 52994c0177 Merge branch 'improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:44:56 +00:00
tmacro e109b0fca7
CLDSRV-474: fix CI fail 2023-12-12 10:21:01 +01:00
Maha Benzekri 9940699f9d
CLDSRV-474: fixup on mutiObjectDelete 2023-12-12 10:11:18 +01:00
Maha Benzekri 869d554e43
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.8/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:42:25 +01:00
Maha Benzekri 2f8b228595
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:39:20 +01:00
Maha Benzekri 539b2c1630
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:35:11 +01:00
Maha Benzekri 320766e7b2
Merge remote-tracking branch 'origin/improvement/CLDSRV-430-delete-api-implicitDeny' into w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:31:56 +01:00
Maha Benzekri 74425d03f8
CLDSRV-430: version bump 2023-12-08 18:29:19 +01:00
Maha Benzekri 91629a0d18
CLDSRV-430: add delete API implicit deny logic
As for multiObjectDelete,a new function was added to
ensure that all actions are allowed.
2023-12-08 18:29:17 +01:00
Maha Benzekri e44b7ed918
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 12:00:50 +01:00
Maha Benzekri 3cb29f7f8e
CLDSRV-429: version bump for version release 2023-12-05 12:00:09 +01:00
Maha Benzekri 4f08a4dff2
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 11:58:27 +01:00
Maha Benzekri 15a1aa7965
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 10:58:04 +01:00
Maha Benzekri 4470ee9125
CLDSRV-429: version bump for release 2023-12-05 10:55:31 +01:00
Francois Ferrand d8c12597ea
Release cloudserver 8.8.7
Issue: CLDSRV-471
2023-12-01 19:03:38 +01:00
Francois Ferrand c8eb9025fa
Merge remote-tracking branch 'origin/improvement/CLDSRV-471' into w/8.8/improvement/CLDSRV-471 2023-12-01 19:03:17 +01:00
Francois Ferrand 57e0f71e6a
Release cloudserver 8.7.33
Issue: CLDSRV-471
2023-12-01 19:01:30 +01:00
Francois Ferrand f22f920ee2
Bump arsenal 8.1.115
Issue: CLDSRV-471
2023-12-01 18:42:26 +01:00
Maha Benzekri ed1bb6301d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:31:50 +01:00
Maha Benzekri 70dfa5b11b
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:29:14 +01:00
Maha Benzekri f17e7677fa
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:27:44 +01:00
Maha Benzekri 63b00fef55
Merge remote-tracking branch 'origin/improvement/CLDSRV-429-get-apis-implicitDeny' into w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:25:04 +01:00
Maha Benzekri b4f0d34abd
CLDSRV-429: version bump 2023-12-01 10:27:58 +01:00
Maha Benzekri e18f83ef0d
CLDSRV-429: update get apis with impDeny logic 2023-11-30 17:17:30 +01:00
Francois Ferrand a4e6f9d034
Add lifecycle restore duration metrics
Issue: CLDSRV-471
2023-11-30 14:55:01 +01:00
Maha Benzekri cf94b9de6a
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:23:08 +01:00
Maha Benzekri da0492d2bb
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:22:32 +01:00
Maha Benzekri 979b9065ed
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:19:27 +01:00
Maha Benzekri d5a3923f74
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:18:06 +01:00
Maha Benzekri 23cbbdaaed
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:17:05 +01:00
Maha Benzekri e506dea140
Merge remote-tracking branch 'origin/development/8.6' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:15:52 +01:00
Maha Benzekri 78721be7f7
Merge remote-tracking branch 'origin/improvement/CLDSRV-428-put-apis-impDeny' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:10:26 +01:00
Maha Benzekri 02c5a46d14
Merge remote-tracking branch 'origin/development/7.70' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:06:41 +01:00
Maha Benzekri b138955ef2
Merge remote-tracking branch 'origin/development/7.10' into HEAD 2023-11-29 16:01:54 +01:00
Maha Benzekri 7d10e5d69e
CLDSRV-428:Bump CLDSRV version 2023-11-29 15:59:28 +01:00
bert-e bc291fe3a7 Merge branches 'w/8.8/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.8 2023-11-27 17:16:14 +00:00
bert-e 8dc7432c51 Merge branches 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.7 2023-11-27 17:16:14 +00:00
bert-e 040fe53e53 Merge branches 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/7.70/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.6 2023-11-27 17:16:13 +00:00
bert-e 60e350a5cf Merge branches 'w/7.70/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/7.10/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/7.70 2023-11-27 17:16:13 +00:00
bert-e 5de00c80f8 Merge branch 'bugfix/CLDSRV-463/bump_cloudserver' into q/7.10 2023-11-27 17:16:13 +00:00
bert-e 6f963bdcd9 Merge branch 'w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:56 +00:00
bert-e cd9024fd32 Merge branch 'w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:55 +00:00
Maha Benzekri 37649bf49b
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 17:01:43 +01:00
Maha Benzekri abf5ea33a9
Merge remote-tracking branch 'origin/improvement/CLDSRV-428-put-apis-impDeny' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:59:09 +01:00
Maha Benzekri 2596f3fda8
CLDSRV-428: put apis updated for implicit deny
In this commit put apis have been updated to check for implicit deny
returned by vault and added as a parameter in the request Object.
Tests have also been added for the metadataUtils validateBucket
function.
MetadataUtils functions have been updated to check for implicit
deny.
The goal is to implement the same authorization
logic as AWS, where an implicit deny from IAM and an Allow from
the Bucket Policy should allow the request for example.
For the delete on the objectPutCopyPart and objectPutPart as we need to
deferentiate between the vault request and the external backend once
a delete is applied to the request directly as it's unique per API call
this value is then added to the request object. here's the link to the
design doc for more details:
https://github.com/scality/citadel/blob/development/1.0/docs/design/bucket-policies.md?plain=1#L263
2023-11-27 16:47:43 +01:00
bert-e dff7610060 Merge branch 'w/8.7/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.8/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:07 +00:00
bert-e 757c2537ef Merge branch 'w/8.6/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.7/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:06 +00:00
Maha Benzekri c445322685
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-427-permissions-checks' into w/8.6/improvement/CLDSRV-427-permissions-checks 2023-11-17 12:28:19 +01:00
bert-e 2344204746 Merge branch 'improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/7.70/improvement/CLDSRV-427-permissions-checks 2023-11-17 10:50:50 +00:00
Maha Benzekri 693ddf8d35
Merge branch 'development/7.10' into improvement/CLDSRV-427-permissions-checks 2023-11-17 11:40:17 +01:00
Maha Benzekri 6caa5cc26a
CLDSRV-427: Improving functions using helper function
- In this commit , I added a helper (processBucketPolicy) function
for the bycket policies checks that are shared between the
isbucketAuthorized, isObjAuthorized and evaluateBucketPolicyWithIAM
for a better code readability and to avoid long functions.

(cherry picked from commit 33d7c99e0c)
2023-11-17 11:36:22 +01:00
bert-e 4515b2adbf Merge branch 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
bert-e 50ffdd260b Merge branch 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
Taylor McKinnon 3836848c05 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/bump_cloudserver' into w/8.6/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 15:26:23 -08:00
Taylor McKinnon 813a1553d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-463/bump_cloudserver' into w/7.70/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 15:25:13 -08:00
Taylor McKinnon 1238cd809c bf(CLDSRV-463): Bump cloudserver to 7.10.34/7.70.31 2023-11-16 15:23:14 -08:00
bert-e b5f22d8c68 Merge branches 'w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.8 2023-11-16 19:43:14 +00:00
bert-e 68ff54d49a Merge branches 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.7 2023-11-16 19:43:13 +00:00
bert-e a74b3eacf8 Merge branches 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.6 2023-11-16 19:43:13 +00:00
bert-e f00a2f2d9e Merge branch 'q/5403/7.10/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/normal/q/7.70 2023-11-16 19:43:13 +00:00
bert-e 02bb60253a Merge branch 'bugfix/CLDSRV-463/strictly_check_algo_headers' into q/7.10 2023-11-16 19:43:12 +00:00
bert-e 3fe5579c80 Merge branch 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:08 +00:00
bert-e 3fdd2bce21 Merge branch 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:07 +00:00
Taylor McKinnon 44e6eb2550 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 11:20:55 -08:00
Taylor McKinnon c148c770ac Merge remote-tracking branch 'origin/bugfix/CLDSRV-463/strictly_check_algo_headers' into w/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 11:17:40 -08:00
Maha Benzekri fa2f877825
CLDSRV-427: linting fixups and retrocompatibility changes
(cherry picked from commit a7396a721c)
2023-11-15 11:26:02 +01:00
Will Toozs 0e323fbefe
CLDSRV-427: update bucket/object perm checks to account for implicit …
…denies

(cherry picked from commit c01898f1a0)
(cherry picked from commit 7aa326cba9)
2023-11-15 11:26:01 +01:00
bert-e c9b512174f Merge branches 'w/8.8/bugfix/CLDSRV-460-forward-system-signals' and 'q/5431/8.7/bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/q/8.8 2023-11-15 10:14:18 +00:00
bert-e 7b48624cf7 Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into q/8.7 2023-11-15 10:14:17 +00:00
bert-e 55b07def2e Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/w/8.8/bugfix/CLDSRV-460-forward-system-signals 2023-11-15 09:43:35 +00:00
bert-e 62ae2b2c69 Merge branch 'w/7.70/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.6/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e fcc9468b63 Merge branch 'w/8.6/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.7/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e efc44a620d Merge branch 'w/8.7/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.8/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
Maha Benzekri 72342f6654
Merge remote-tracking branch 'origin/improvement/CLDSRV-468-version-bump' into w/7.70/improvement/CLDSRV-468-version-bump 2023-11-14 12:06:02 +01:00
Maha Benzekri fa11e58d57
CLDSRV-468:CLDSRV version bump 2023-11-14 11:57:09 +01:00
bert-e 1bc19b39d7 Merge branches 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.7 2023-11-13 17:20:17 +00:00
bert-e b5fa3a1fd3 Merge branches 'w/8.8/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.8 2023-11-13 17:20:17 +00:00
bert-e 68a6fc659c Merge branches 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.6 2023-11-13 17:20:16 +00:00
bert-e 2624a05018 Merge branches 'w/7.70/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/7.10/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/7.70 2023-11-13 17:20:16 +00:00
bert-e 0882bfffb9 Merge branch 'improvement/CLDSRV-466/timestamps_in_stderr' into q/7.10 2023-11-13 17:20:15 +00:00
bert-e c0fc958365 Merge branch 'w/8.7/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.8/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:26 +00:00
bert-e d3c74d2c16 Merge branch 'w/8.6/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.7/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:25 +00:00
Maha Benzekri 9001285177
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-426-acl-impl-deny' into w/8.6/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 17:02:22 +01:00
bert-e bae6e8ecb3 Merge branch 'improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/7.70/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 15:56:01 +00:00
Will Toozs e0eab954aa
CLDSRV-426: add tests for ACL permission check updates
CLDSRV-426: additionnal test for ACL permission
2023-11-13 13:10:38 +01:00
Will Toozs 19b4e25373
CLDSRV-426: update ACL permission checks for implicitDeny logic
CLDSRV-426:fixups on ACL permission checks for implicitDeny logic

CLDSRV-426:better readability on ACL permission
2023-11-13 13:10:37 +01:00
Kerkesni 07eda89a3f
forward system signals to the node process using tini
npm run doesn’t handle signal forwarding and crashes
on the SIGTERM signal sent by Kubernetes.

Tini spawns a process at PID 1 that handles forwarding
system signals to all it's child processes.

Issue: CLDSRV-460
2023-11-13 12:07:29 +01:00
bert-e 27b4066ca4 Merge branch 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.7/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 2ee5b356fa Merge branch 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.8/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 233955a0d3 Merge branch 'w/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.6/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:44 +00:00
bert-e ab51522110 Merge branch 'improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/7.70/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:44 +00:00
Rahul Padigela b1b2d2ada6 improvement CLDSRV-466 add timestamp for exceptions 2023-11-10 08:17:34 -08:00
bert-e f5d3433413 Merge branches 'w/8.8/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.8 2023-11-09 17:31:36 +00:00
bert-e 62b4b9bc25 Merge branches 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.7 2023-11-09 17:31:35 +00:00
bert-e ce4b2b5a27 Merge branches 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/7.70/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.6 2023-11-09 17:31:34 +00:00
bert-e 96bd67ee60 Merge branch 'improvement/CLDSRV-464/support_mpu_scuba' into q/7.70 2023-11-09 17:31:34 +00:00
bert-e ec56c77881 Merge branch 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.8/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e d0abde3962 Merge branch 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.7/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e f08a3f434b Merge branch 'improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.6/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:06 +00:00
bert-e fdc682f2db Merge branches 'w/8.8/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.7/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.8 2023-11-07 09:32:43 +00:00
bert-e b184606dc2 Merge branches 'w/8.7/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.6/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.7 2023-11-07 09:32:43 +00:00
bert-e 172ec4a714 Merge branches 'w/8.6/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/7.70/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.6 2023-11-07 09:32:42 +00:00
bert-e ae770d0d3f Merge branch 'improvement/CLDSRV-424-apicall-auth-update' into q/7.10 2023-11-07 09:32:41 +00:00
bert-e 7d2613e9a3 Merge branches 'w/7.70/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/7.10/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/7.70 2023-11-07 09:32:41 +00:00
Maha Benzekri 9ce0f2c2b6
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-424-apicall-auth-update' into w/8.8/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:20:41 +01:00
Maha Benzekri 43b4e0c713
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-424-apicall-auth-update' into w/8.7/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:18:48 +01:00
Maha Benzekri 2bda761518
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-424-apicall-auth-update' into w/8.6/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:16:48 +01:00
Maha Benzekri bfc9ca68c9
Merge remote-tracking branch 'origin/improvement/CLDSRV-424-apicall-auth-update' into w/7.70/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:13:45 +01:00
Maha Benzekri 6abb0d96a9
CLDSRV-424:CLDSRV version bump
Update lib/api/api.js

Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2023-11-07 09:06:23 +01:00
Maha Benzekri 733f424a4b
CLDSRV-424:ARSN version bump 2023-11-03 12:39:09 +01:00
Will Toozs 8d4ff7df5f
CLDSRV-424: api call updated with implicit deny logic
change variable names for clarity

edit: update arsenal package
2023-11-03 12:39:01 +01:00
Taylor McKinnon 59b87479df possible => unsupported 2023-11-01 10:17:20 -07:00
Taylor McKinnon 967ab966fa impr(CLDSRV-464): Add owner-id to mpu part metadata 2023-11-01 09:06:20 -07:00
Taylor McKinnon 212c7f506c impr(CLDSRV-464): Pass overhead fields for complete MPU and subsequent part bath delete 2023-11-01 09:04:32 -07:00
Taylor McKinnon 1e9ee0ef0b bf(CLDSRV-463): Strictly validate checksum algorithm headers 2023-10-30 10:54:34 -07:00
bert-e 9185f16554 Merge branch 'w/8.7/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.8/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
bert-e 2df9a57f9c Merge branch 'w/8.6/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.7/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
Nicolas Humbert c96706ff28 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-462/tags' into w/8.6/bugfix/CLDSRV-462/tags 2023-10-25 20:42:14 +02:00
Nicolas Humbert daa6f46b14 Merge remote-tracking branch 'origin/bugfix/CLDSRV-462/tags' into w/7.70/bugfix/CLDSRV-462/tags 2023-10-25 20:21:40 +02:00
Nicolas Humbert 44315057df CLDSRV-462 bump project version 2023-10-25 19:59:47 +02:00
Nicolas Humbert 61fe64a3ac CLDSRV-462 Expiration header is not compatible with legacy object md
Before the Object Metadata refactor done around May 31, 2017 (c22e44f63d), if no tags were set, the object tag was stored as undefined.

After the commit, if no tags are set, the object tag is stored as an empty object '{}'.

When the expiration response headers were implemented on 812b09afef around Nov 22, 2021, the empty object was handled, but not the undefined tag logic, which made the expiration response headers not backward compatible.

We need to address both cases: the undefined property and the empty object '{}'.
2023-10-25 19:59:07 +02:00
bert-e 68535f83d6 Merge branches 'w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.8 2023-10-24 18:40:33 +00:00
bert-e 41d63650be Merge branches 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.7 2023-10-24 18:40:32 +00:00
bert-e 4ebb5d449a Merge branches 'w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/7.70/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.6 2023-10-24 18:40:32 +00:00
bert-e 48abedc6f7 Merge branch 'bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into q/7.70 2023-10-24 18:40:31 +00:00
bert-e 12185f7c3b Merge branches 'w/8.8/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.7/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.8 2023-10-19 20:36:18 +00:00
bert-e 5f82ee2d0e Merge branches 'w/8.7/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.6/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.7 2023-10-19 20:36:18 +00:00
bert-e 7e0f9c63fe Merge branches 'w/8.6/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/7.70/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.6 2023-10-19 20:36:17 +00:00
bert-e 9f5ac17357 Merge branch 'improvement/CLDSRV-449/pass_overhead_fields' into q/7.70 2023-10-19 20:36:17 +00:00
Taylor McKinnon d72bc5c6b9 Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-449/pass_overhead_fields' into w/8.8/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 13:16:26 -07:00
Taylor McKinnon 0e47810963 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-449/pass_overhead_fields' into w/8.7/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 12:40:23 -07:00
Taylor McKinnon 8d83546ee3 Merge remote-tracking branch 'origin/improvement/CLDSRV-449/pass_overhead_fields' into w/8.6/improvement/CLDSRV-449/pass_overhead_fields 2023-10-16 12:01:03 -07:00
Taylor McKinnon fff4fd5d22 impr(CLDSRV-449): Add unit tests for overheadField param 2023-10-16 11:13:49 -07:00
Taylor McKinnon 1016a6826d impr(CLDSRV-449): Pass overheadField to Metadata in API handlers 2023-10-16 11:13:49 -07:00
bert-e 3b36cef85f Merge branch 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 18:57:46 +00:00
Jonathan Gramain 114b885c7f Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:35:23 -07:00
Jonathan Gramain e56d4e3744 Merge remote-tracking branch 'origin/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:05:32 -07:00
Jonathan Gramain 15144e4adf CLDSRV-458 bump cloudserver version 2023-10-11 11:03:02 -07:00
Jonathan Gramain 3985e2a712 bf: CLDSRV-458 fix bucketd params on null version update
On in-place updates of "legacy" null versions (those without the
"isNull2" attribute, using the "nullVersionId" chain instead of null
keys), we mustn't pass the "isNull" query parameter when sending the
update request to bucketd. Otherwise, it creates a null key which
causes issues when deleting the null version later.

Use a helper to pass the right set of parameters in all request types
that update versions in-place.
2023-10-11 10:59:56 -07:00
williamlardier 3b95c033d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal' into w/8.8/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal 2023-10-06 17:59:22 +02:00
williamlardier 04091dc316 CLDSRV-457: bump project version 2023-10-06 14:54:35 +02:00
williamlardier 56023a80ed CLDSRV-457: bump arsenal 2023-10-06 14:54:34 +02:00
bert-e 2deaebd89a Merge branch 'w/8.7/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.8/bugfix/CLDSRV-455/skip 2023-10-05 16:41:46 +00:00
bert-e c706ccf9c6 Merge branch 'w/8.6/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.7/bugfix/CLDSRV-455/skip 2023-10-05 16:41:45 +00:00
Nicolas Humbert 4afb2476f8 Merge remote-tracking branch 'origin/bugfix/CLDSRV-455/skip' into w/8.6/bugfix/CLDSRV-455/skip 2023-10-05 18:21:54 +02:00
Nicolas Humbert 91a7e7f24f CLDSRV-455 orphan delete marker list interruption skips processed key
The key marker in the orphan delete marker listing response should match the last key in the response's key array.
This ensures that the next listing begins after the key that has already been returned.
2023-10-05 15:55:45 +02:00
Taylor McKinnon 2f344cde70 impr(CLDSRV-449): Pass overheadField through helper functions to MetadataWrapper 2023-10-04 15:04:10 -07:00
Taylor McKinnon ad154085ac impr(CLDSRV-449): Use correct method in log message 2023-10-04 15:04:10 -07:00
Francois Ferrand 583ea8490f
Bump 8.8.3
Issue: CLDSRV-454
2023-10-04 11:18:25 +02:00
bert-e 85a9480793 Merge branch 'w/8.8/improvement/CLDSRV-446/bump' into tmp/octopus/q/8.8 2023-10-03 10:44:50 +00:00
bert-e be2f65b69e Merge branch 'bugfix/CLDSRV-423-test-sproxyd' into q/8.8 2023-10-03 10:12:16 +00:00
bert-e 1ee6d0a87d Merge branch 'w/8.7/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.8/improvement/CLDSRV-446/bump 2023-10-02 15:25:13 +00:00
bert-e 224af9a5d2 Merge branch 'w/8.6/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-446/bump 2023-10-02 15:25:12 +00:00
Nicolas Humbert 9e2ad48c5c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/bump' into w/8.6/improvement/CLDSRV-446/bump 2023-10-02 17:12:32 +02:00
Nicolas Humbert 780971ce10 CLDSRV-446 bump version 2023-10-02 17:08:41 +02:00
bert-e 74f05377f0 Merge branch 'w/8.7/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.8/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:08 +00:00
bert-e 111e14cc89 Merge branch 'w/8.6/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.7/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:07 +00:00
Nicolas Humbert fd6fb5a26c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/listing-scanned-limit' into w/8.6/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 15:30:02 +02:00
Nicolas Humbert 8df540dcc1 CLDSRV-446 Limiting entries scanned during the lifecycle listing 2023-10-02 15:08:37 +02:00
Florent Monjalet 00b20f00d1 Merge remote-tracking branch 'origin/development/8.8' into bugfix/CLDSRV-423-test-sproxyd 2023-10-02 13:45:58 +02:00
Florent Monjalet a91d53a12c CLDSRV-423: test distinct and overwriting PUTs 2023-09-27 11:58:20 +02:00
Florent Monjalet 63d2637046 CLDSRV-423: improve async series usage in test 2023-09-27 11:50:44 +02:00
Maha Benzekri 5d416ad190
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-444-id-resource-policy' into w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:49:03 +02:00
Maha Benzekri ff29cda03f
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-444-id-resource-policy' into w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:47:33 +02:00
Maha Benzekri 5685b2e972
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:45:19 +02:00
Maha Benzekri 4e4ea2ab84
Merge remote-tracking branch 'origin/bugfix/CLDSRV-444-id-resource-policy' into w/7.70/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:43:20 +02:00
Florent Monjalet cb8baf2dab CLDSRV-423: provide a proper dockerfile for test sproxyd 2023-09-27 11:36:49 +02:00
Maha Benzekri 67e5694d26
CLDSRV-447:CLDSRV version bump 2023-09-27 11:23:26 +02:00
bert-e 22f470c6eb Merge branch 'w/8.7/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:17 +00:00
bert-e e510473116 Merge branch 'w/8.6/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:16 +00:00
Maha Benzekri d046e8a294
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 10:27:47 +02:00
Maha Benzekri 20a730788a
Merge remote-tracking branch 'origin/bugfix/CLDSRV-444-id-resource-policy' into w/7.70/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 10:15:37 +02:00
Maha Benzekri 47958591ec
CLDSRV-447:ARSN version bump
fixes after reviews
2023-09-26 16:13:14 +02:00
Maha Benzekri 4195b6ae6a
CLDSRV-447:Test add for principal && arsn bump 2023-09-25 15:13:58 +02:00
Maha Benzekri feefd13b68
CLDSRV-444: unit test with Id and arsenal version 2023-09-12 22:51:04 +02:00
Florent Monjalet 17a6808fe4 CLDSRV-423: bump arsenal and sproxydclient to fix SPRXCLT-12 2023-08-31 19:07:44 +02:00
Florent Monjalet df646e4802 CLDSRV-423: disable failing tests that have just been reenabled
They had been disabled for a long while and cannot be reenabled yet
because they don't pass, so keep on skipping them for now.

Tickets have been created to take care of them:

- CLDSRV-440
- CLDSRV-441
- CLDSRV-442
- CLDSRV-443
2023-08-31 19:06:34 +02:00
Florent Monjalet 267770d256 CLDSRV-423: reproduce SPRXCLT-12 more often 2023-08-31 19:06:34 +02:00
Florent Monjalet 1b92dc2c05 CLDSRV-423: perform two successive put in multiple backend tests
This tests for SPRXCLT-12 issue
2023-08-31 19:06:34 +02:00
Florent Monjalet f80bb2f34b CLDSRV-423: don't run sproxyd test when testing Ceph 2023-08-31 19:06:34 +02:00
Florent Monjalet 4f89b67bb9 CLDSRV-423: Add missing mock logger method 2023-08-31 19:06:34 +02:00
Florent Monjalet 8b5630923c CLDSRV-423: refactor multiple backend put tests to avoid duplication 2023-08-31 19:06:34 +02:00
Florent Monjalet 9ff5e376e5 CLDSRV-423: reenable a good chunk of multiple backend tests 2023-08-31 19:06:34 +02:00
Florent Monjalet a9b5a2e3a4 CLDSRV-423: add put test for sproxyd 2023-08-31 19:06:34 +02:00
Florent Monjalet 7e9ec22ae3 CLDSRV-423: deploy sproxyd for multiple backend tests 2023-08-31 19:06:34 +02:00
bert-e 9d4664ae06 Merge branch 'w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.8/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e 662265ba2e Merge branch 'w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e c7da82dda7 Merge branch 'w/7.70/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:29 +00:00
Taylor McKinnon 960b4b2dd4 Merge remote-tracking branch 'origin/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into w/7.70/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 09:41:53 -07:00
Taylor McKinnon 1e9af343b9 bf(CLDSRV-439): Bump version to 7.10.30 2023-08-30 09:25:48 -07:00
Taylor McKinnon 8bb7338080 bf(CLDSRV-439): Bump arsenal to 7.10.47 2023-08-30 09:24:17 -07:00
Taylor McKinnon 17e4f14f9c Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-413/bump_version' into w/8.8/bugfix/CLDSRV-413/bump_version 2023-08-18 10:10:01 -07:00
Taylor McKinnon 014b071536 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-413/bump_version' into w/8.7/bugfix/CLDSRV-413/bump_version 2023-08-18 10:07:14 -07:00
Taylor McKinnon 9130f323d4 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/bump_version' into w/8.6/bugfix/CLDSRV-413/bump_version 2023-08-18 10:05:33 -07:00
Taylor McKinnon c09d3282dc Merge remote-tracking branch 'origin/bugfix/CLDSRV-413/bump_version' into w/7.70/bugfix/CLDSRV-413/bump_version 2023-08-18 09:54:43 -07:00
Taylor McKinnon fb9175579f bf(CLDSRV-413): Bump cloudserver version 2023-08-18 09:44:53 -07:00
bert-e 2d45f92ae1 Merge branches 'w/8.8/feature/CLDSRV-420/backport' and 'q/5268/8.7/feature/CLDSRV-420/backport' into tmp/octopus/q/8.8 2023-08-18 14:53:18 +00:00
bert-e 48452496fa Merge branches 'w/8.7/feature/CLDSRV-420/backport' and 'q/5268/8.6/feature/CLDSRV-420/backport' into tmp/octopus/q/8.7 2023-08-18 14:53:18 +00:00
bert-e b89773eba6 Merge branch 'q/5268/7.70/feature/CLDSRV-420/backport' into tmp/normal/q/8.6 2023-08-18 14:53:18 +00:00
bert-e c738e0924e Merge branch 'feature/CLDSRV-420/backport' into q/7.70 2023-08-18 14:53:16 +00:00
bert-e 18bf6b8d4a Merge branch 'w/8.7/feature/CLDSRV-420/backport' into tmp/octopus/w/8.8/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
bert-e 858c31a542 Merge branch 'w/8.6/feature/CLDSRV-420/backport' into tmp/octopus/w/8.7/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
Nicolas Humbert 75a759de27 Merge remote-tracking branch 'origin/feature/CLDSRV-420/backport' into w/8.6/feature/CLDSRV-420/backport 2023-08-18 12:57:48 +02:00
bert-e 19d3e0bc9d Merge branch 'w/8.7/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.8/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:03:00 +00:00
bert-e bac044dc8f Merge branch 'w/8.6/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.7/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:02:59 +00:00
Taylor McKinnon 8f77cd18c8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/crr_existing_null_version' into w/8.6/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 10:02:29 -07:00
bert-e cb7609b173 Merge branch 'bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/7.70/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:00:57 +00:00
Taylor McKinnon 2926048735 bf(CLDSRV-413): Set isNull in objectMD fin backbeat putMetadata route for current null versions 2023-08-17 09:59:58 -07:00
Taylor McKinnon 656ef3fcee bf(CLDSRV-413): improve backbeat route testing setup cleanup 2023-08-16 14:25:31 -07:00
bert-e 8c0f709014 Merge branch 'bugfix/CLDSRV-422' into tmp/octopus/w/8.8/bugfix/CLDSRV-422 2023-08-16 11:46:43 +00:00
Francois Ferrand ce92d33a5d
Fix use of http_requests_total metrics
It was missed when metric names were updated. In addition, the dashboard
was not up-to-date with the python source, and needed to be regenerated.

Issue: CLDSRV-422
2023-08-14 14:36:14 +02:00
Kerkesni 0381cce85c
Merge remote-tracking branch 'origin/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring' into w/8.8/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring 2023-08-10 16:07:42 +02:00
Kerkesni 20a08a2a4e
bump version to 8.7.26 2023-08-10 16:04:25 +02:00
Kerkesni ff73d8ab12
add tests for keeping object properties after restore
Issue: CLDSRV-408
2023-08-10 16:03:58 +02:00
Kerkesni 1ee44bc6d3
keep same object properties after a restore of a cold object
Object properties such as ACLs and custom user metadata should
not be removed after the restore of a cold object.

Issue: CLDSRV-408
2023-08-10 12:58:18 +02:00
bert-e 614e876536 Merge branches 'w/8.8/improvement/CLDSRV-400' and 'q/5191/8.7/improvement/CLDSRV-400' into tmp/octopus/q/8.8 2023-08-09 16:42:42 +00:00
bert-e b40a77d94b Merge branch 'improvement/CLDSRV-400' into q/8.7 2023-08-09 16:42:42 +00:00
bert-e 3a3a73b756 Merge branch 'improvement/CLDSRV-400' into tmp/octopus/w/8.8/improvement/CLDSRV-400 2023-08-09 16:19:33 +00:00
Nicolas Humbert 6789959109 CLDSRV-420 add us-east-2 location 2023-08-09 10:14:47 -04:00
Nicolas Humbert bf9b53eea9 CLDSRV-420 fix linter 2023-08-09 10:14:47 -04:00
Nicolas Humbert aa04d23e68 CLDSRV-420 test v1 and v0 bucket format 2023-08-09 10:14:47 -04:00
Nicolas Humbert e08aaa7bcc CLDSRV-412 Test null version in Lifecycle list of non-current versions
(cherry picked from commit 98f855f997)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 1d9c44126a CLDSRV-375 Exclude already transitioned keys from lifecycle listings
(cherry picked from commit 4c189b2d9e)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 70a28ab620 CLDSRV-420 fix import metrics 2023-08-09 10:14:47 -04:00
Nicolas Humbert 550451eefa CLDSRV-372 Current lifecycle versions should include version id
(cherry picked from commit f7f77c6cd2)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 96befd3f28 CLDSRV-371 ETag should be surrounded by double quotes
(cherry picked from commit f20bf1becf)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 75288f1b56 CLDSRV-366 Clear list orphan delete markers response
(cherry picked from commit f2292f1ca3)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 6847f2b0c4 CLDSRV-363 ETag instead of Etag for lifecycle listings Contents
(cherry picked from commit 049f52bf95)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 050059548e CLDSRV-317 Implement listLifecycleOrphans
(cherry picked from commit ec9ed94555)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 9b2a557a05 CLDSRV-316 Implement listLifecycleNonCurrents
(cherry picked from commit 41cc399d85)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 7a7e2f4b91 CLDSRV-314 Implement listLifecycleCurrents
(cherry picked from commit 6b8a2581b6)
2023-08-09 10:14:42 -04:00
bert-e 3f6e85590d Merge branches 'w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.8 2023-08-07 17:27:19 +00:00
bert-e de589a07e8 Merge branches 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.6 2023-08-07 17:27:18 +00:00
bert-e bc009945d2 Merge branches 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.7 2023-08-07 17:27:18 +00:00
bert-e 8db04f4486 Merge branches 'w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/7.10/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/7.70 2023-08-07 17:27:17 +00:00
bert-e 328b7bc335 Merge branch 'bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into q/7.10 2023-08-07 17:27:17 +00:00
bert-e 3ac30d9bab Merge branch 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e 32204fbfbf Merge branch 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e b1eda2a73a Merge branch 'w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:45 +00:00
bert-e 0249ad9bcf Merge branch 'bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:45 +00:00
bert-e 5a26e1a80d Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 507a2d4ff5 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 8cdd35950b Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:26:59 +00:00
bert-e bfa366cd27 Merge branch 'improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:26:59 +00:00
Dimitrios Vasilas d132757696 CLDSRV-411: Add tests for imposing last-modified in testing mode 2023-07-20 09:58:09 +02:00
Alexander Chan 2a4be31b8a CLDSRV-196: create new werelogs object over using global werelogs
(cherry picked from commit 7fd547db24)
2023-07-19 12:25:55 -07:00
bert-e 1207a6fb70 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e 5883286864 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e b206728342 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:27 +00:00
bert-e 347a7391b9 Merge branch 'improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:27 +00:00
Dimitrios Vasilas 6273eebe66
CLDSRV-411: Use method to set last-modified
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2023-07-19 10:45:17 +02:00
bert-e 2a37e809d9 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:22 +00:00
bert-e 86ce7691cd Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
bert-e c04f663480 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
Dimitrios Vasilas f2493e982f CLDSRV-411: Remove double import 2023-07-18 14:15:44 +02:00
bert-e e466b5e92a Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:16 +00:00
bert-e a4bc10f730 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
bert-e e826033bf0 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
Dimitrios Vasilas c23dad6fb8 Merge remote-tracking branch 'origin/improvement/CLDSRV-411-impose-last-modified' into w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-17 18:50:30 +02:00
Dimitrios Vasilas 5fcdaa5a97 CLDSRV-411: Add mechanism for imposing the last-modified
When the configuration parameter "testingMode" is set to true,
a put can specify a custom last-modified date using the header
x-amz-meta-x-scal-last-modified.

This is intended to be used in tests only.
2023-07-17 18:40:18 +02:00
Dimitrios Vasilas 9f61ef9a3b CLDSRV-411: Add testing mode 2023-07-17 12:05:16 +02:00
Nicolas Humbert c480301e95 Merge remote-tracking branch 'origin/improvement/CLDSRV-414/bump' into w/8.8/improvement/CLDSRV-414/bump 2023-07-14 15:52:57 -04:00
Nicolas Humbert 276be285cc CLDSRV-414 bump version 2023-07-14 15:47:13 -04:00
bert-e 897d41392a Merge branch 'w/8.7/bugfix/CLDSRV-412/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-412/null 2023-07-14 14:08:38 +00:00
bert-e f4e3a19d61 Merge branch 'bugfix/CLDSRV-412/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-412/null 2023-07-14 14:08:37 +00:00
Nicolas Humbert ee84a03d2c bump arsenal 2023-07-14 09:49:30 -04:00
Nicolas Humbert 98f855f997 CLDSRV-412 Test null version in Lifecycle list of non-current versions 2023-07-14 09:48:47 -04:00
williamlardier 7c52fcbbb0
CLDSRV-402: bump project version 2023-07-13 17:45:06 +02:00
bert-e da52688a39 Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:26 +00:00
bert-e 1cb54a66f8 Merge branch 'w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:25 +00:00
williamlardier 0bb61ddb5b
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 17:12:05 +02:00
williamlardier 68e4b0610a
CLDSRV-402: bump project version 2023-07-13 17:10:06 +02:00
bert-e d9fffdad9e Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 13:08:26 +00:00
williamlardier 389c32f819
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:06:34 +02:00
williamlardier c2df0bd3eb
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 14:22:48 +02:00
williamlardier af0436f1cd
CLDSRV-402: bump project version 2023-07-13 09:54:35 +02:00
williamlardier f7593d385e
CLDSRV-402: bump arsenal dependency 2023-07-13 09:54:35 +02:00
williamlardier 84068b205e
CLDSRV-402: test multi object delete optimization 2023-07-13 09:54:34 +02:00
williamlardier 9774d31b03
CLDSRV-402: optimize multideleteobject API
- Parallelism is increased to reduce the latency
- If the backend supports it, batching is used
- Batch the deletion of objects from storage
- Flag to disable or enable the optimization, as well as
  a way to tune it.
2023-07-13 09:54:34 +02:00
Kerkesni d26b8bcfcc
test keeping same storage class when restoring a cold object
Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
Kerkesni e4634621ee
keep storage class as cold for restored objects
To be compliant with the AWS S3 standard, the storage class
of restored objects should be left as cold location

Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
williamlardier 0b58b3ad2a
CLDSRV390: bump mongodb to 4.4 2023-06-22 16:56:53 +02:00
bert-e 652bf92536 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:43 +00:00
bert-e c5b1ef63ee Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:42 +00:00
bert-e 227de16bca Merge branch 'improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:41 +00:00
williamlardier c57a6e3c57
CLDSRV-409: fix s3cmd test 2023-06-22 15:14:24 +02:00
bert-e 344ee8a014 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
bert-e 5d7a434306 Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
williamlardier 852ae72a13
Merge remote-tracking branch 'origin/improvement/CLDSRV-409-fix-python-version' into w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:43:18 +02:00
williamlardier 507782bd17
CLDSRV-409: remove virtualenv 2023-06-22 14:42:09 +02:00
bert-e b7e7f65d52 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:37:53 +00:00
williamlardier d00320a8ba
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-409-fix-python-version' into w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:36:28 +02:00
williamlardier 4cf07193d9
Merge remote-tracking branch 'origin/improvement/CLDSRV-409-fix-python-version' into w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:34:03 +02:00
williamlardier aef272ea3c
CLDSRV-409: remove python 2.7 2023-06-22 14:30:43 +02:00
williamlardier 31d1734d5c
CLDSRV-409: use latest s3cmd with python3 2023-06-22 13:52:14 +02:00
bert-e c5b7450a4d Merge branches 'w/8.7/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.7 2023-06-12 22:01:22 +00:00
bert-e eb5affdced Merge branches 'w/8.6/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.6 2023-06-12 22:01:21 +00:00
bert-e cdaf6db929 Merge branches 'w/8.5/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.5 2023-06-12 22:01:21 +00:00
bert-e 91ada795d0 Merge branches 'w/7.70/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/7.70 2023-06-12 22:01:20 +00:00
bert-e 2b420a85e0 Merge branch 'w/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/7.10 2023-06-12 22:01:19 +00:00
Nicolas Humbert 18c8d4ecac CLDSRV-404 bump version 2023-06-09 11:48:56 -04:00
Nicolas Humbert c8150c6857 CLDSRV-397 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-08 12:14:36 -04:00
bert-e 399a2a53ab Merge branch 'improvement/CLDSRV-399/addWorkflowDispatch' into q/8.7 2023-06-05 20:39:18 +00:00
Alexander Chan bbad049b5f CLDSRV-399: add workflow_dispatch 2023-06-05 11:30:35 -07:00
bert-e 2a4e2e1584 Merge branch 'w/8.6/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-398/bump 2023-06-02 20:19:28 +00:00
bert-e 08e43f5084 Merge branch 'w/8.5/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.6/improvement/CLDSRV-398/bump 2023-06-02 20:19:27 +00:00
Nicolas Humbert cc153c99d6 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-398/bump' into w/8.5/improvement/CLDSRV-398/bump 2023-06-02 15:58:58 -04:00
Nicolas Humbert d3f9870389 Merge remote-tracking branch 'origin/improvement/CLDSRV-398/bump' into w/7.70/improvement/CLDSRV-398/bump 2023-06-02 15:14:08 -04:00
Nicolas Humbert 0fa264693d CLDSRV-398 bump version 2023-06-02 15:05:47 -04:00
bert-e b304d05614 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:27 +00:00
bert-e 751f6ce559 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
bert-e 0330597679 Merge branch 'w/7.70/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
Nicolas Humbert 27cacc9552 CLDSRV-396 add nullVersionCompatMode condition 2023-06-02 14:14:05 -04:00
bert-e 004bd63368 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:33 +00:00
bert-e e047ae6fbb Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:32 +00:00
Nicolas Humbert ebca8dd05e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 12:09:50 -04:00
bert-e 52535fb44b Merge branch 'bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/7.70/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 15:56:03 +00:00
Nicolas Humbert 1ed32b2cae CLDSRV-396 If put metadata for a null version, set options.isNull to true 2023-06-02 11:55:35 -04:00
Nicolas Humbert 960d736962 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-396/put-metadata-null' into w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 11:24:34 -04:00
bert-e 11098dd113 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 15:13:36 +00:00
Nicolas Humbert 9cc7362fbd Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 08:27:34 -04:00
KillianG 32401c9a83
bump 8.7.23 2023-05-30 09:40:36 +00:00
KillianG 5f05b676cc
Merge remote-tracking branch 'origin/development/8.7' into HEAD 2023-05-26 09:46:21 +00:00
KillianG fd662a8c2c
Bump arsenal 8.1.101 and test delete markers are not listed when bucket versionning is suspended
Issue: CLDSRV-347
2023-05-26 08:46:42 +00:00
bert-e a843d53939 Merge branch 'bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/7.70/bugfix/CLDSRV-396/put-metadata-null 2023-05-25 21:48:52 +00:00
Nicolas Humbert f889100798 CLDSRV-396 putMetadata API route is not updating null version properly 2023-05-25 17:42:47 -04:00
bert-e 5d54dd58be Merge branch 'bugfix/CLDSRV-393' into q/8.7 2023-05-25 19:47:24 +00:00
Nicolas Humbert 1bd0deafcf CLDSRV-395 bump to 8.7.21 2023-05-25 14:02:47 -04:00
Francois Ferrand 7c788d3dbf Bump github actions
Issue: CLDSRV-393
2023-05-25 14:02:47 -04:00
Nicolas Humbert 50cb6a2bf1 CLDSRV-374 putMetadata API route is not updating null version properly
Instead of using the provided "null" value, the metadata "null version id" is now used when updating the metadata of a null version.
2023-05-25 09:40:20 -04:00
bert-e 58f7bb2877 Merge branch 'w/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.7/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e f899337284 Merge branch 'w/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.6/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e b960a913ec Merge branch 'w/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.5/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:12 +00:00
bert-e 5436c0698e Merge branch 'w/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/7.70/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:12 +00:00
bert-e 3ff7856a94 Merge branch 'improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/7.10/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:11 +00:00
gaspardmoindrot 57fb5f1403 [CLDSRV-388] Implement GHAS 2023-05-24 22:39:31 +00:00
Francois Ferrand ea284508d7
Update x-amz-restore when updating the expiry date
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 0981fa42f3
Add version name in release runs
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 7e63064a52
Bump github actions
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 71b21e40ca
Add eslint rule to prevent exclusive tests
Lint will fail if it finds any `describe.only` or `it.only`.

Issue: CLDSRV-393
2023-05-24 17:14:24 +02:00
Francois Ferrand ff894bb545
Remove describe.only
This should never have been commited, as it disables most unit tests from
CI.

This caused some tests to actually fail:
* bad import of refactored `objectDelete` api
* getting an object while transitioning (archiving) is allowed

Issue: CLDSRV-393
2023-05-24 17:09:33 +02:00
Francois Ferrand ae9f24e1bb
Update expiry date on s3:restore on restored object
If the object is already restored, we simply need to update the expiry
date, as per AWS docs:
> After restoring an archived object, you can update the restoration
> period by reissuing the request with a new period. Amazon S3 updates
> the restoration period relative to the current time.

Issue: CLDSRV-393
2023-05-24 16:52:45 +02:00
bert-e 2dc01ce3ed Merge branch 'w/8.7/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/q/8.7 2023-05-15 16:39:05 +00:00
Kerkesni 9bd9bef6c7
bump version in package.json to 8.7.20
Issue: CLDSRV-386
2023-05-11 10:34:27 +02:00
bert-e 3a8bbefb6c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
bert-e a6a5c273d5 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
Dimitrios Vasilas c329d9684d Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 16:23:51 +02:00
bert-e ec5baf1f85 Merge branch 'improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/7.70/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:23:30 +00:00
Dimitrios Vasilas d844fb4fa1 CLDSRV-370: Push non-Federation image as cloudserver/cloudserver:<tag> 2023-05-10 16:19:18 +02:00
Kerkesni 6479076fec
bump node version to 16.20 in Dockerfile
Issue: CLDSRV-386
2023-05-10 13:35:54 +02:00
bert-e c436e2657c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
bert-e df45f481d0 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
Dimitrios Vasilas 406f3f0093 Revert "CLDSRV-370: docker-entrypoint: make bucketd bootstrap configurable"
This reverts commit 1d76f61d88.
2023-05-09 19:19:47 +02:00
Dimitrios Vasilas 6952b91539 CLDSRV-370: Pin virtualenv version to 20.21.0
Virtualenv setup fails with the latest version (20.23)
2023-05-09 19:18:40 +02:00
Dimitrios Vasilas eea1ebb5ec Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 19:17:35 +02:00
bert-e dae5b7dc28 Merge branch 'improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/7.70/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:12:54 +00:00
Dimitrios Vasilas 1d76f61d88 CLDSRV-370: docker-entrypoint: make bucketd bootstrap configurable 2023-05-09 19:07:50 +02:00
Dimitrios Vasilas 8abe809141 CLDSRV-370: Build dev docker image on release 2023-05-09 19:07:50 +02:00
Dimitrios Vasilas 94b14a258e CLDSRV-370: Pin virtualenv version to 20.21
Virtualenv setup fails with the latest version (20.23)
2023-05-09 19:07:46 +02:00
bert-e cd8c589eba Merge branch 'improvement/CLDSRV-375/exclude-keys' into tmp/octopus/w/8.7/improvement/CLDSRV-375/exclude-keys 2023-04-28 18:20:48 +00:00
williamlardier daec2661ae
CLDSRV-385: use mongodb v4.2 for the CI 2023-04-21 15:03:01 +02:00
Francois Ferrand 0f266371a0
Bump version 8.7.18
Issue: CLDSRV-383
2023-04-17 23:36:28 +02:00
Francois Ferrand 73e56963bf
Fix originOp when deleting a version
DeleteMarkerCreated was sent instead of the expect Delete, which breaks
bucket notifications.

Issue: CLDSRV-383
2023-04-17 23:14:49 +02:00
Nicolas Humbert 4c189b2d9e CLDSRV-375 Exclude already transitioned keys from lifecycle listings 2023-04-16 21:54:16 -07:00
Alexander Chan fb11d0f42e Merge remote-tracking branch 'origin/feature/CLDSRV-368/addBackbeatRouteForIndexingOps' into w/8.7/feature/CLDSRV-368/addBackbeatRouteForIndexingOps 2023-04-14 18:35:38 -07:00
Alexander Chan fe6690da92 bump arsenal 2023-04-14 18:08:42 -07:00
williamlardier 9cbd9f7be8
CLDSRV-381: bump project version 2023-04-14 22:29:03 +02:00
williamlardier c2fc8873cb
CLDSRV-381: bump arsenal 2023-04-14 22:28:47 +02:00
Francois Ferrand bee1ae04bf
Bump version 8.7.15
Issue: CLDSRV-380
2023-04-14 09:06:04 +02:00
Francois Ferrand eb86552a57
Allow reading transition-in-progress objects
This “transition in progress” state does not exist in AWS S3 (so we have no reference), and we need to access the data for cold storage framework.

When the transition has been performed, the archive id and storage class will be updated first (as well as clearing the ‘transitioning’ flag) before triggering the “GC” to remove the (local) data.

So we are sure that data is available in this state, and that simply checking that the object is in cold storage is enough.

Issue: CLDSRV-380
2023-04-14 09:02:32 +02:00
Alexander Chan 80fbf78d62 CLDSRV-368: add indexing routes 2023-04-13 15:17:03 -07:00
bert-e f5d8f2fac5 Merge branch 'w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.7/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
bert-e b1e13d6efa Merge branch 'w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
Jonathan Gramain e7ef437b27 Merge remote-tracking branch 'origin/feature/CLDSRV-359-passGetDeleteMarkerFlag' into w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 11:42:08 -07:00
Jonathan Gramain 55f652ecc4 CLDSRV-359 bump cloudserver version 2023-04-13 11:40:29 -07:00
Jonathan Gramain 77f56d1fa1 feat: CLDSRV-359 pass getDeleteMarker flag to metadata when needed
Pass the `getDeleteMarker` flag to the Metadata backend when the
Cloudserver operation requires to distinguish if the target is a
delete marker or does not exist, to set response header
"x-amz-delete-marker" or return a specific error code.
2023-04-13 11:39:36 -07:00
bert-e 36e841b542 Merge branches 'w/8.7/feature/CLDSRV-355-activateNullKeys' and 'q/5069/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.7 2023-04-13 18:35:42 +00:00
bert-e a2404ed622 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.6 2023-04-13 18:35:41 +00:00
williamlardier 1d12a430a0
CLDSRV-376: bump to 8.7.13 2023-04-13 16:59:28 +02:00
williamlardier bea27b4fb4
CLDSRV-376: update mongoClient used in functional tests 2023-04-13 16:59:13 +02:00
williamlardier 76405d9179
CLDSRV-376: bump mongodb driver 2023-04-13 16:59:12 +02:00
Alexander Chan 31b7f1e71c bump version 2023-04-12 15:36:00 -07:00
Alexander Chan 8674cac9f8 CLDSRV-379: bump arsenal 2023-04-12 15:35:27 -07:00
KillianG d5b666a246
Better indentation and use bool isExpiration only in the first function, after, uses directly originOp string
Issue: CLDSRV-367
2023-04-11 14:59:02 +00:00
KillianG 4360772971
Improve the way we pass originOp to make it clearer
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 6e152e33d5
Use boolean in parameter instead of hardcoded originOP
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 94f34979a5
add origin op to all delete object calls
Issue: CLDSRV-367
2023-04-11 13:43:36 +00:00
bert-e 4be430c313 Merge branch 'improvement/CLDSRV-372/vid' into q/8.6 2023-04-07 18:35:02 +00:00
bert-e 4b0f165b46 Merge branches 'w/8.7/improvement/CLDSRV-372/vid' and 'q/5109/8.6/improvement/CLDSRV-372/vid' into tmp/octopus/q/8.7 2023-04-07 18:35:02 +00:00
Nicolas Humbert 3590377554 Merge remote-tracking branch 'origin/improvement/CLDSRV-372/vid' into w/8.7/improvement/CLDSRV-372/vid 2023-04-07 07:58:01 -04:00
Nicolas Humbert f7f77c6cd2 CLDSRV-372 Current lifecycle versions should include version id 2023-04-06 19:09:04 -04:00
bert-e 8a08f97492 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:48 +00:00
bert-e a908d09cc8 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:47 +00:00
Jonathan Gramain 170a68a4f8 CLDSRV-355 [8.5+] fixup problematic automerge
Restore missing `require('../Config')` in lib/api/objectDelete.js
2023-04-05 10:57:42 -07:00
bert-e 448afa50e3 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:47 +00:00
bert-e a0fff19611 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:46 +00:00
bert-e 6ad1643ba8 Merge branch 'w/8.4/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:45 +00:00
Jonathan Gramain 5ce253ef62 Merge remote-tracking branch 'origin/feature/CLDSRV-355-activateNullKeys' into w/8.4/feature/CLDSRV-355-activateNullKeys 2023-04-04 17:27:11 -07:00
Jonathan Gramain 72f4c36077 CLDSRV-355 set 'isNull2' attr in copied null key
In order to other logic to detect properly null keys written from
non-compat Cloudservers, we also need to set the 'isNull2' param in
those when we copy them from the master key.
2023-04-04 16:23:25 -07:00
Jonathan Gramain e534af856f feat: CLDSRV-355 activate null keys behavior
Activate the use of null keys in place of null versioned keys by Cloudserver:

- allow processVersioningState() and preprocessingVersioningDelete()
  helpers to return the associated fields for null key handling, which
  tells Cloudserver to set its behavior to create/delete null keys,
  via sending PUT/DELETE requests with `versionId="null"` to the
  Metadata backend

- pass 'isNull' parameter in version-specific requests to hint the
  Metadata backend on what to do (most useful for V1 backend, but also
  to hint V0 backend that it should handle null keys appropriately)

- set "isNull2" metadata attribute when writing a null master, for
  optimization purpose (allows to avoid checking the null versioned
  key on update)
2023-04-04 16:23:25 -07:00
bert-e 5dd8d9057a Merge branch 'w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 50b738cfff Merge branch 'w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 2be3ce21c7 Merge branch 'w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 70ff6fc4ee Merge branch 'feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:48 +00:00
bert-e c5214d19a6 Merge branch 'w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
bert-e 951a98fcaf Merge branch 'w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.7/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
Jonathan Gramain ebb0fed48a Merge remote-tracking branch 'origin/feature/CLDSRV-378-forceEnableNullCompatMode' into w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 15:17:14 -07:00
Jonathan Gramain 5f85c14ab9 CLDSRV-378 [8.x] force null version compat mode
force null version compatibility mode to be enabled, so that
Cloudserver stays compatible with MongoDB backend not supporting null
keys.

Remove the associated aws-sdk functional test suite related to
compatibility mode
2023-04-04 14:39:00 -07:00
bert-e 8ca770dcb7 Merge branch 'w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.7/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:15 +00:00
bert-e 7923977300 Merge branch 'w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:14 +00:00
Jonathan Gramain 3597c146d7 CLDSRV-358 fix issue deleting master null version
Fix an issue that occurred when deleting a null version that was the
current version AND that had a null key. This may happen in various
cases, e.g. if the repair process did repair the master by the null
version, in which case it would not delete the null versioned key
(this has been fixed with null keys).

The fix is to not send the `isNull: true` parameter to Metadata only
because Cloudserver is not in compatibility mode, instead, only send
this parameter if the master key has the `isNull2: true` parameter set
(meaning it was put by a non-compat Cloudserver).
2023-04-04 14:14:56 -07:00
Jonathan Gramain c81e49ba9b CLDSRV-358 fix deletion of null key in null compat mode
A version-specific DELETE of the null version did not work if:

- the request comes from a compat-mode Cloudserver

- the null version had been put by a non-compat mode Cloudserver

To handle this case properly, we look at the "isNull2" field of the
null version fetched, which is only set on non-compat Cloudserver, in
which case we send the "isNull" param to Metadata to instruct to
delete the null key instead of a null versioned key.
2023-04-04 14:14:56 -07:00
Jonathan Gramain e93c064b5f feat: CLDSRV-358 preprocessingVersioningDelete() update for null keys
Add support for null keys in the preprocessingVersioningDelete()
helper, essentially, set a 'isNull' boolean param that gets passed to
Metadata, which tells whether the version to delete is a null version.

NOTE: The null version compatibility mode is still enforced for now
until all pieces are updated to make functional tests pass without the
compatibility mode.
2023-04-04 14:14:55 -07:00
Jonathan Gramain 2b3774600d CLDSRV-358 [test] fix error code checking
In functional tests of 'objectDelete', an "afterAll" cleanup can crash
because it checks an error code before checking if there's an error
object.

It does not crash in normal circumstances because there is an actual
error due to the last unit test trying to clean the bucket, but if
anything changes in the unit tests that leaves the bucket existing
would have triggered this issue.
2023-04-04 14:14:55 -07:00
Jonathan Gramain a6951f2ef8 CLDSRV-358 [cleanup] objectDelete: remove unused assignment
Remove unused assignment of 'deleteInfo.isNull'
2023-04-04 14:14:55 -07:00
Jonathan Gramain 9fb232861f Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 14:03:37 -07:00
Jonathan Gramain 6cf4e291fa Merge remote-tracking branch 'origin/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 13:21:30 -07:00
Jonathan Gramain 06b4320e7d CLDSRV-357 fix behavior change causing issue with mongo backend
When looking for a null version to delete, the code had changed its
behavior: it triggered a delete on the metadata even when the null
version was deleted. While technically not an issue for S3C as
Metadata returns a 200 when attempting to delete a non-existing
version (albeit creating a useless request), the MongoDB backend
returns NoSuchKey error in that case, leading to an issue.

Revert the behavior so to not trigger a delete of the null version
when it does not exist.
2023-04-04 13:17:24 -07:00
bert-e 3585b8d5eb Merge branch 'w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 9331c0a375 Merge branch 'w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 70f368408d Merge branch 'w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
Jonathan Gramain a63762ae71 CLDSRV-357 honor 'delOptions.deleteData' even if master is null
Move check of 'delOptions.deleteData' in prepareNullVersionDeletion()
earlier, so that it is honored even if the master key is a null
version.

This goes with the new possibility to return 'delOptions' without
deleteData in order to delete an existing null key for the master key
(done as part of CLDSRV-353).
2023-04-04 09:52:22 -07:00
Jonathan Gramain f0420572c8 feat: CLDSRV-357 pass deleteNullKey param to backend
Pass the 'deleteNullKey' param that processVersioningState() may set
to the Metadata backend, which tells it to delete the null key as the
PUT is executed.
2023-04-04 09:52:22 -07:00
Jonathan Gramain b1fd915ba3 feat: CLDSRV-357 update versioningPreprocessing() helper for null keys
Modify the code flow of versioningPreprocessing() to support null keys
in addition to the legacy "null versioned keys".

NOTE: The null version compatibility mode is still enforced for now
until all pieces are updated to make functional tests pass without the
compatibility mode.
2023-04-04 09:52:22 -07:00
Jonathan Gramain 4285c18e44 Merge remote-tracking branch 'origin/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 09:49:28 -07:00
Jonathan Gramain 71ffd004df CLDSRV-353 bump arsenal dep 2023-04-04 09:46:07 -07:00
Jonathan Gramain f674104825 CLDSRV-353 + case of delete null versioned key
Add a case in processVersioningState() to delete the null versioned
key where the key is a legacy null version (we know this because
`isNull2` is not set) and we are going to write it as a null key,
because in older Cloudservers there may be an associated null version
as the master null in certain circumstances.
2023-04-03 18:11:15 -07:00
Jonathan Gramain 9c9d4b3e7c CLDSRV-353 remove conversion of null version key to null key
Fix an issue that occurred when converting a null versioned key into a
null key (that would occur when a non-compat mode Cloudserver updates
a compat-mode object having a noncurrent null version). The issue was
that the null key was updated with the master's contents instead of
the original null version contents.

The fix consists of keeping the backward compatibility by setting a
`nullVersionId` instead, which avoids to have to read the null version
metadata first. It is not important to convert those keys as the
migration from V0 to V1 will necessarily have to convert existing
legacy null versions anyway.
2023-04-03 18:11:15 -07:00
Jonathan Gramain 13265a3d6e CLDSRV-353 [optim] no legacy null version key deletion
In case the master has been updated with a null-key-enabled
Cloudserver, there can be no more versioned key associated (as the new
behavior guarantees that a null master cannot have an associated null
versioned key, see S3C-7526).

Thanks to this, we can avoid having to check the versioned key for
deletion when a null master version is updated on a
versioning-suspended bucket, which is arguably a rather common use
case.

To implement that, we will add a "isNull2" attribute to the master key
in a subsequent commit (part of CLDSRV-355) when Cloudserver is not in
null version compatibility mode.

This commit is implementing the optimization by checking the new
"isNull2" metadata attribute, and skipping the null version check in
case the flag is set.
2023-04-03 18:10:07 -07:00
Jonathan Gramain 31c5316a7e feat: CLDSRV-353 processVersioningState() null key support
Add support for null key handling for the helper
processVersioningState(), and maintain the null version compatibility
mode to keep the old behavior - for now, the calling code sets this
flag to "true" without using the config to maintain current behavior,
it will be changed with CLDSRV-355.

One slight behavior change in compatibility mode is that when an old
null versioned key is deleted due to a PUT overwriting the null
version, we do not send the "replayId" to the DELETE request, but
instead, rely on the "oldReplayId" sent by the PUT, because this is
the normal way of letting metadata know how to get rid of the existing
replayId for the existing null version on versioning-suspended buckets.
2023-04-03 18:10:07 -07:00
bert-e 0a1489ee46 Merge branch 'w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.7/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:59 +00:00
bert-e 71f80544ac Merge branch 'w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 270080a75b Merge branch 'w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 74717b2acb Merge branch 'w/7.70/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:57 +00:00
bert-e ef81f3e58f Merge branch 'improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/7.70/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:57 +00:00
Jonathan Gramain aa55a87a65 impr: CLDSRV-373 new func test for S3C-5139
Add a new functional test in aws-node-sdk that reproduces the steps
that formerly triggered S3C-5139, which was silently fixed since it
was opened.

It is useful also for S3C-7352 because it covers a corner case for
both old and new null version handling in v0 and v1 metadata formats.
2023-04-03 09:30:35 -07:00
Xin LI de5b4331e2 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 11:00:24 +02:00
Xin LI e1a4f1ef8c bugfix: CLDSRV-365 bump 2023-03-31 10:58:07 +02:00
bert-e 46dff0321d Merge branch 'w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:07 +00:00
bert-e f3c7580510 Merge branch 'w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
bert-e 2145bb3ae3 Merge branch 'w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
Xin LI 468162c81c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 10:53:21 +02:00
Xin LI 89f9139203 Merge remote-tracking branch 'origin/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/7.70/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 10:48:44 +02:00
Xin LI 8153554a4c bugfix: CLDSRV-365 bump version 2023-03-31 10:46:47 +02:00
Xin LI fb9063bccc bugfix: CLDSRV-365 fix legal hold can be deleted issue and add more tests 2023-03-31 10:46:47 +02:00
bert-e ddc6ea72be Merge branch 'improvement/CLDSRV-371/etag' into tmp/octopus/w/8.7/improvement/CLDSRV-371/etag 2023-03-29 20:22:38 +00:00
Nicolas Humbert f20bf1becf CLDSRV-371 ETag should be surrounded by double quotes 2023-03-29 16:16:52 -04:00
bert-e d31c773e77 Merge branch 'w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e d266ff4e9f Merge branch 'w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.7/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 6ff21996f5 Merge branch 'w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 15d1b3ba86 Merge branch 'w/7.70/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:16 +00:00
bert-e 827c752e9a Merge branch 'improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/7.70/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:15 +00:00
Jonathan Gramain 82dc837610 impr: CLDSRV-369 new version-specific DELETE functional test
Add a new functional test in the aws-node-sdk test suite, that

- creates a nonversioned key

- then enables versioning and creates 5 more versioned keys

- then deletes the 5 versioned keys in a batch delete

- it expects the null version to be the only remaining version

Its primary purpose is to check version-specific DELETE with a null
version in V1 metadata format (MD-847), but it is a good addition to
the existing test suite also in V0 format.
2023-03-28 14:09:10 -07:00
bert-e 7dc2f07cb6 Merge branch 'w/8.7/improvement/CLDSRV-366/clear' into tmp/octopus/q/8.7 2023-03-28 13:25:15 +00:00
Kerkesni 6c22d87c55
bump version to 8.7.11
Issue: CLDSRV-362
2023-03-28 12:25:11 +02:00
Kerkesni 310f67d3a7
throw error when getting a transitioning object
Issue: CLDSRV-362
2023-03-28 12:24:50 +02:00
Kerkesni 49841c5e0e
throw error when copying parts from a cold object
A cold object should not be allowed to get copied as the data
is not accessible.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni b5334baca8
throw error when copying a cold or transitioning object
A cold object should not be allowed to get copied as the data
is not accessible.

Same issue happens when copying an object that is transitioning,
the data might get deleted while copying is still in progress.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni e592671b54
add helper to check if object is in cold storage
Issue: CLDSRV-362
2023-03-28 12:24:48 +02:00
bert-e 6e0b66849d Merge branch 'improvement/CLDSRV-366/clear' into tmp/octopus/w/8.7/improvement/CLDSRV-366/clear 2023-03-28 03:45:02 +00:00
Nicolas Humbert f2292f1ca3 CLDSRV-366 Clear list orphan delete markers response 2023-03-27 15:52:49 -04:00
bert-e 18a1bfd325 Merge branch 'w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.7/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:13 +00:00
bert-e c2b54702f6 Merge branch 'w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:12 +00:00
Jonathan Gramain 13a5e14da5 impr: CLDSRV-356 [8.5+] adapt overwritingVersioning() for archive
Due to the change in what processVersioningState() returns
(nullVersionId embedded in an "extraMD" field for clarity), modify the
overwritingVersioning() helper that needs to have the same contract
than the former function.
2023-03-24 16:37:24 -07:00
Jonathan Gramain 891913fd16 Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState' into w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 15:52:32 -07:00
bert-e 7baa2501e6 Merge branch 'improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 22:50:02 +00:00
Jonathan Gramain 8e808afec9 impr: CLDSRV-356 enhance processVersioningState() and fix replayId
- enhance general flow of processVersioningState(), to make it easier
  to read and update for null key handling

- fix an issue related to passing the uploadId for nonversioned
  buckets (linked to S3C-7361): remove a check "master.isNull" to also
  pass the uploadId as replayId when the master is non-versioned, so
  that it can be deleted by passing it to the metadata DELETE request

- make processVersioningState() return a 'nullVersionId' param rather
  than a "storeOptions", as it is always used to copy master to a null
  version, it simplifies a bit the API

- make processVersioningState() return extra metadata params to set in
  the object ("nullVersionId" and "nullUploadId") in their own
  "extraMD" object, for clarity

- remove undefined params returned by the function to have clean unit
  tests
2023-03-24 12:51:48 -07:00
bert-e 2c999f4c10 Merge branch 'w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e b23472a754 Merge branch 'w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e a4999c1bfb Merge branch 'w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
bert-e fe0b0f8b2f Merge branch 'feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
Jonathan Gramain c2bee23fd1 ft: CLDSRV-354 handle null keys in metadata GET helpers
Update the helpers in metadataUtils to handle null keys, as well as
keeping backward compatibility with null versioned keys.

The main change in logic for null keys is that, instead of fetching
first the master key then the null versioned key, we first attempt to
fetch the null key, and if not found, we fetch the master key (we may
then also have to fetch the null versioned key for backward
compatibility).

Take the chance to reduce tech debt by reorganizing the helpers
responsibilities in a better way, and by using the "validateBucket"
helper.
2023-03-24 12:45:07 -07:00
Jonathan Gramain e87c2a4e5f CLDSRV-354 [cleanup] new helper metadataUtils.validateBucket()
Factorize logic to validate a bucket and return the relevant error
code in a helper function (checks on bucketShield, bucket policies,
then bucket authorization)
2023-03-24 12:42:02 -07:00
Jonathan Gramain db943cd634 CLDSRV-354 [optim] remove unnecessary check in getVersionIdResHeader() 2023-03-24 12:42:02 -07:00
bert-e bf7a643d45 Merge branch 'w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.7/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:15 +00:00
bert-e 874a53c767 Merge branch 'w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:14 +00:00
Jonathan Gramain c7e1c6921b Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:43:47 -07:00
Jonathan Gramain 6d2d56bc1e Merge remote-tracking branch 'origin/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:04:47 -07:00
bert-e 3f3bf0fdf0 Merge branches 'w/8.7/bugfix/CLDSRV-363/etag' and 'q/5078/8.6/bugfix/CLDSRV-363/etag' into tmp/octopus/q/8.7 2023-03-24 18:01:38 +00:00
bert-e 1922facb7b Merge branch 'bugfix/CLDSRV-363/etag' into q/8.6 2023-03-24 18:01:38 +00:00
Jonathan Gramain fff03d3320 CLDSRV-349 [tests] func test stage for ENABLE_NULL_VERSION_COMPAT_MODE
Turn the "file-ft-tests" job into a matrix, to duplicate the suite
with and without the ENABLE_NULL_VERSION_COMPAT_MODE environment variable.

This will make sure Cloudserver behaves correctly (versioning, null
version handling etc.)  when the compatibility mode is active.
2023-03-24 10:36:39 -07:00
Jonathan Gramain 6e79d3f1a4 ft: CLDSRV-349 support ENABLE_NULL_VERSION_COMPAT_MODE env var
Cloudserver sets a flag in its configuration when the
ENABLE_NULL_VERSION_COMPAT_MODE environment variable is set to "true".
2023-03-24 10:36:39 -07:00
bert-e 2a44949048 Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 1576352613 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 74978f423e Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 6f4cd75d6f Merge branch 'w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 00906d04f5 Merge branch 'bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:04 +00:00
Jonathan Gramain 270339f2bb CLDSRV-361 guard before accessing err.is field 2023-03-24 09:51:22 -07:00
bert-e 6660626190 Merge branch 'bugfix/CLDSRV-363/etag' into tmp/octopus/w/8.7/bugfix/CLDSRV-363/etag 2023-03-24 13:23:06 +00:00
Nicolas Humbert 049f52bf95 CLDSRV-363 ETag instead of Etag for lifecycle listings Contents 2023-03-23 16:51:12 -04:00
williamlardier 58fc0b7146
CLDSRV-350: bump to 8.7.10 2023-03-21 13:52:26 +01:00
williamlardier 11e3d7ecb2
CLDSRV-350: update veeam put and delete routes with new arsenal methods
We must ensure that concurrent updates of the bucket metadata won't conflict
with each other, by separately updating the capabilities fields. This change
ensures that two files can be uploaded at the same without any problem,
regardless of the number of cloudserver instances.
2023-03-21 13:52:25 +01:00
williamlardier 1bab851ce3
CLDSRV-350: bump arsenal version 2023-03-21 13:52:25 +01:00
bert-e 0bc0341f33 Merge branch 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e b5af652dc8 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e 6c29be5137 Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:36 +00:00
Jonathan Gramain 2967f327ed Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 15:08:01 -07:00
Jonathan Gramain 0f8a56e9b5 Merge remote-tracking branch 'origin/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 14:25:03 -07:00
Jonathan Gramain c1d2601237 CLDSRV-361 [8.x fix] backbeat multiple backend route fix
Fix handling of response of metadataGetObject() in multiple backend
PutTagging and DeleteTagging when target object does not exist: return
error code NoSuchKey in this case.

Note: NoSuchKey used to be returned by metadataGetObject() helper but
not always, now it's never returned, instead the "objMD" returned
value is null.
2023-03-20 13:25:09 -07:00
Jonathan Gramain 885f95606c bugfix: CLDSRV-361 fix exception with batch delete of null version
- Fix an exception in Cloudserver when doing a batch delete containing a
  deletion of a null version that does not exist on a versioned object

- The changes also fix the return code when fetching a noncurrent null
  version that was deleted: in such case it was returning NoSuchKey,
  now returns NoSuchVersion per spec.

The change consists of:

- consolidate and fix the API contract of metadataGetObject() and
  metadataValidateBucketAndObj() for "not found" objects or object
  versions: there was a mix of return codes from Metadata (NoSuchKey)
  and an OK response with no metadata returned in others, depending on
  the exact scenario and object state. Fixed by always returning an OK
  response and no metadata if the target version is not found, to let
  the caller set the appropriate error code in the API response

- for multiObjectDelete, fix by handling the OK response with empty
  object metadata response specifically as a "not found" case, like
  for other API calls
2023-03-20 13:12:02 -07:00
bert-e b5b0f6482b Merge branch 'feature/CLDSRV-317/listLifecycleOrphans' into tmp/octopus/w/8.7/feature/CLDSRV-317/listLifecycleOrphans 2023-03-20 13:53:09 +00:00
Nicolas Humbert ec9ed94555 CLDSRV-317 Implement listLifecycleOrphans 2023-03-20 09:52:42 -04:00
bert-e 755f282f8e Merge branch 'feature/CLDSRV-316/listLifecycleNonCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-316/listLifecycleNonCurrents 2023-03-17 18:00:21 +00:00
Nicolas Humbert 41cc399d85 CLDSRV-316 Implement listLifecycleNonCurrents 2023-03-17 13:58:22 -04:00
bert-e c4dc928de2 Merge branch 'feature/CLDSRV-314/listLifecycleCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-314/listLifecycleCurrents 2023-03-17 16:20:16 +00:00
Nicolas Humbert 6b8a2581b6 CLDSRV-314 Implement listLifecycleCurrents 2023-03-17 11:48:02 -04:00
Killian Gardahaut a0087e8d77
Bump 8.7.9
Issue: ZKOP-219
2023-03-17 09:58:21 +01:00
KillianG 8e5bea56b6
Refacto tests for more readability
Issue: CLDSRV-337
2023-03-17 09:58:21 +01:00
KillianG 976e349036
Add tests
Adding test for the function azureArchiveLocationConstraintAssert

Issue: CLDSRV-337
2023-03-17 09:58:16 +01:00
KillianG de1c23ac1b
Add test on location constraints to ensure the location is well configured
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 0b4d04a2a3
Add location azure archive to cold storage locations
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 049d396c8d
Add azure_archive location type
ISSUE: CLDSRV-337
2023-03-17 09:56:35 +01:00
Naren 5c04cbe6d1 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-327-cloudserver-metrics' into w/8.7/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:36:59 -07:00
Naren d3e538087a Merge remote-tracking branch 'origin/w/8.5/improvement/CLDSRV-327-cloudserver-metrics' into w/8.6/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:05:26 -07:00
bert-e 7cc37c7f3d Merge branch 'w/8.4/improvement/CLDSRV-327-cloudserver-metrics' into tmp/octopus/w/8.5/improvement/CLDSRV-327-cloudserver-metrics 2023-03-17 03:50:44 +00:00
Naren 399d081d68 impr: CLDSRV-327 upgrade arsenal, bucketclient, prom-client, utapi, vaultclient 2023-03-16 20:33:03 -07:00
Naren c3fac24366 Merge remote-tracking branch 'origin/improvement/CLDSRV-327-cloudserver-metrics' into w/8.4/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 20:23:37 -07:00
Naren 82687aa1a7 impr: CLDSRV-327 minor updates
add info log on metric server start.
refactor unit tests.
2023-03-16 17:35:28 -07:00
Naren 820ada48ce impr: CLDSRV-327 bump version to 7.70.18 2023-03-16 15:43:56 -07:00
Naren df73cc7ebc impr: CLDSRV-327 upgrade arsenal, bucketclient, prom-client, utapi, vaultclient 2023-03-16 15:43:07 -07:00
Naren 429c62087d impr: CLDSRV-327 refactor metrics server
prom-client will not aggregate metrics from all workers, so moved
metrics server to master and aggregate metrics using AggregatorRegistry.
Metrics are moved to a separate file to not confuse with metrics
handler.
2023-03-16 15:39:13 -07:00
Naren 13fa26986d impr: CLDSRV-327 linter corrections 2023-03-02 09:52:39 -08:00
bert-e 5cb63991a8 Merge branch 'w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.7/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 16:30:18 +00:00
Naren c45dac7ffc impr: CLDSRV-327 add monitoring tests 2023-02-28 11:33:12 -08:00
Naren 878fc6819f impr: CLDSRV-327 generate cloudserver metrics 2023-02-28 11:32:37 -08:00
Naren 43592f9392 impr: CLDSRV-327 setup cloudserver monitoring 2023-02-28 11:32:13 -08:00
Naren dbd1383c32 impr: CLDSRV-327 add prom-client 2023-02-28 11:32:13 -08:00
Alexander Chan c310cb3dd1 Merge remote-tracking branch 'origin/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.7/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:47:15 -08:00
bert-e 22cda51944 Merge branch 'w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/q/8.7 2023-02-22 16:39:53 +00:00
williamlardier 408d0de732
CLDSRV-343: bump cloudserver to the next version 2023-02-22 09:59:09 +01:00
williamlardier 83916c91fb
CLDSRV-343: enable back some CEPH backend tests
These tests also cover the ObjectTagging API with multiple backend.
Enabling them back will allow us to better avoid issues like this
in the future.
2023-02-17 14:24:59 +01:00
bert-e 110b2a35ed Merge branch 'w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:38 +00:00
williamlardier a8117ca037
CLDSRV-343: use bucket name for backend tagging operations 2023-02-16 15:51:49 +01:00
bert-e 9145d1cf79 Merge branches 'w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.7 2023-02-15 20:43:46 +00:00
bert-e ae1b6dc3d1 Merge branch 'w/8.6/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.7/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e b1304b5f7f Merge branches 'w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.7 2023-02-10 12:57:22 +00:00
bert-e 6b1f8c61ec Merge branch 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:09 +00:00
bert-e 335bfabed1 Merge branch 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 3398db3c0f Merge branch 'w/8.6/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.7/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:19 +00:00
bert-e 836e9fb22d Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:46 +00:00
bert-e ead7f5f7c2 Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:42 +00:00
bert-e c17059dc77 Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e 8ace5b24a5 Merge branches 'development/8.7' and 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:48 +00:00
bert-e 39f7035dbd Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:42 +00:00
williamlardier bb62ed4fa7
CLDSRV-334: bump cloudserver to 8.7.7 2023-01-24 12:33:37 +01:00
williamlardier c95368858d
CLDSRV-334: bump arsenal to 8.1.82 2023-01-24 12:33:17 +01:00
bert-e d8ff1377fc Merge branch 'w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.7/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:31 +00:00
Jonathan Gramain 28f4c5baee Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.7/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:44 -08:00
bert-e 0a8f846f4b Merge branch 'w/8.6/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.7/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
Jonathan Gramain ac5de47ca1 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-312-bumpArsenal' into w/8.7/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 16:03:10 -08:00
williamlardier c147785464
CLDSRV-322: bump cloudserver version 2023-01-06 09:04:04 +01:00
williamlardier ca8c788757
CLDSRV-322: code improvements 2023-01-06 09:04:03 +01:00
williamlardier cb2af364bb
CLDSRV-322: Implement test for custom routes
Unit and funcitonal tests are implemented to test the custom routes.
The LISTing is not yet tested, as it requires more changes to
generate a valid signature, from Mocha.
2023-01-05 15:31:33 +01:00
williamlardier 1eb27d610b
CLDSRV-322: Support custom files for MultiObjectDelete
MultiObjectDelete is implemented by the product UI to delete the
files in buckets. This method is a POST that relies on the request
body to filter the objects, hence, it is not possible to filter
it as an ingress rule in nginx.

The implementation tries to avoid adding any complexity
by extending existing loops, and implementing a new step if elligible
files are found.

These files are extracted from the Veeam route list of accepted files,
but this implementation might change if more custom APIs are supported
in the future.
2023-01-05 15:31:33 +01:00
williamlardier 73b295c91d
CLDSRV-322: Implement LIST for SOSAPI routes
Listing of objects is needed for consistent user experience in the
product's User Interface.

Listing is implemented as a `GET` request with a specific query parameter
`list-type` and folder `.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c`.

This API:
- Handles both versioned and non-versioned listing
- Relies on predefined templates to fill the response content
- Extracts the system.xml and capacity.xml files from the bucket
  metadata
- Computes the listing response based on the input query parameters
  and files currently in the bucket md capabilities
- Handle errors if any non supported query parameter is used. As any GET
  request is routed to this method, we return InvalidRequest if the requested
  action is not supported (i.e., not a listing v2)
2023-01-05 15:31:32 +01:00
williamlardier 8186c84bf9
CLDSRV-322: Implement DELETE for SOSAPI routes
Deletion of Veam SOSAPI files are required to ensure consistent
user experience. This API is only exposed to API/CLI clients, not
User Interface. The User Interface relies on MultiObjectDelete to
perform the deletions, and is handled in a separate commit.

This API:
- Checks that the requested file exists
- Erase the bucket metadata according to the file
- Update the bucket metadata with the updated values.
- Handle errors if the Veaam capability is not yet enabled for the bucket
2023-01-05 15:31:32 +01:00
williamlardier 93ef2d0545
CLDSRV-322: Implement HEAD for SOSAPI routes
HEAD object is not formally required by Veeam SOSAPI, but Veeam
relies on the last-modified date value of the capacity.xml file.
To suppoort any change in future SOSAPI standard, the HEAD method
is implemented, and is similar to the GET method, where only the metadata
are returned.
2023-01-05 15:31:31 +01:00
williamlardier d7d0a31bb1
CLDSRV-322: Implement PUT for SOSAPI routes
In the SOSAPI context, the user is requested to pre-created two files,
system.xml and capacity.xml under the veeam folder, to enable the feature.

This API:
- Extracts the XML from the provided file, convert it to JSON
- Validate that the JSON is valid against joi schemes, if applicable
- Updates the bucket metadata, including the last-modified date
- Update in the database the bucket metadata
- Return the standard success code response
- Handle invalid XML or XML structure, and return an error accordingly
2023-01-05 15:31:31 +01:00
williamlardier 4c69b82508
CLDSRV-322: Implement GET for SOSAPI
The GET method is used by SOSAPI to determine if SOS API is enabled
or not on a bucket.

Two files are supported: system.xml and capacity.xml.

This API:
- Get the bucket metadata
- Dynamically recomputes a valid XML based on the bucket md content
  using xml2js as headless, to enforce the same XML as the one
  from SOSAPI standard
- Rejects the request with an error if the bucket metadata does not
  exist
- Handle the `?tagging` request, required for versioned bucket, to
  return a static content.

Output stream relies on the utils file.
2023-01-05 15:31:30 +01:00
williamlardier ca13284da3
CLDSRV-322: implement common util functions
Custom SOSAPI routes might either retreive or stream data. The utils file
re-implement, with support for this particular context, some functions
from the standard API paths, from Arsenal.

These changes mostly introduce ways to compute the right HTTP headers as
well as input our output streams to handle GET or PUT request types.
2023-01-05 15:31:30 +01:00
williamlardier c6ed75a1d7
CLDSRV-322: implement SOSAPI scheme validator
SOSAPI relies on standard XML files for both the system and the capacity.
It is used by Veeam12+ to determine what capabilities and/or
configuration should be enforced for a given S3-integrated Bucket used
for backups.

The commit introduces scheme validation for JSON objects, as XML will
be first converted using xml2js.

The system.xml file includes the protocol version of SOSAPI: if the
version is not know, no validation is made, to allow for future changes
without formal need to update the product.

Note: maximum XML file size, in case of unsupported protocol version, will
be enforced to avoid spacing issues with the database.
2023-01-05 15:31:30 +01:00
williamlardier 402d0dea1a
CLDSRV-322: Create a new route for Veeam12 SOS API.
This new route is exposed through special nginx rules
from Zenko-Operator, to redirect any call to the veeam
folder, located under .system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c
at the root of the bucket. The goal is to store files in
the bucket metadata, to ease their update by internal jobs.
To avoid impacting standard API, we rely on custom routes
with dedicated logic to handle these files, in a generic
way.

This commit introduces a new route that will manage, in turn,
the:
- Handling of incoming request.
- Validity checks, including list of suppoorted APIs according
  to the HTTP verb and query parameters.
- Authentication and Authorization with Vault, in the same
  way as usual files.
- Check of the targeted bucket and/or keys, to extract the
  bucket metadata.
- Routing of the request to the right API handler.
2023-01-05 15:31:29 +01:00
williamlardier 95faec1db0
CLDSRV-322: bump arsenal version 2023-01-05 15:31:29 +01:00
Jonathan Gramain ca9d53f430 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-321-version-bump' into w/8.7/bugfix/CLDSRV-321-version-bump 2022-12-26 11:19:03 -08:00
bert-e b1ee1f8ef7 Merge branch 'w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.7/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
williamlardier e882cb6781
Merge remote-tracking branch 'origin/bugfix/CLDSRV-320-bump-arsenal' into w/8.7/bugfix/CLDSRV-320-bump-arsenal 2022-12-20 17:44:48 +01:00
Francois Ferrand cb7303636c
Release bump 8.7.1
Issue: CLDSRV-306
2022-12-16 19:56:19 +01:00
Francois Ferrand 6d0f889c23
Merge remote-tracking branch 'origin/feature/CLDSRV-306' into w/8.7/feature/CLDSRV-306 2022-12-16 19:54:23 +01:00
Francois Ferrand c13f2ae6a5
Merge remote-tracking branch 'origin/improvement/CLDSRV-305' into w/8.7/improvement/CLDSRV-305 2022-12-16 18:08:52 +01:00
bert-e b6611c4711 Merge branch 'w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.7/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:48 +00:00
bert-e ae4ece471b Merge branch 'w/8.7/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/q/8.7 2022-12-14 21:19:55 +00:00
williamlardier 15b61cd947
CLDSRV-297: bump cloudserver to 8.7.0 2022-12-14 18:16:55 +01:00
williamlardier 91536c575f
CLDSRV-297: bump projects versions 2022-12-14 18:16:52 +01:00
297 changed files with 21430 additions and 10427 deletions

View File

@ -1,5 +1,8 @@
{ {
"extends": "scality", "extends": "scality",
"plugins": [
"mocha"
],
"rules": { "rules": {
"import/extensions": "off", "import/extensions": "off",
"lines-around-directive": "off", "lines-around-directive": "off",
@ -42,7 +45,8 @@
"no-restricted-properties": "off", "no-restricted-properties": "off",
"new-parens": "off", "new-parens": "off",
"no-multi-spaces": "off", "no-multi-spaces": "off",
"quote-props": "off" "quote-props": "off",
"mocha/no-exclusive-tests": "error",
}, },
"parserOptions": { "parserOptions": {
"ecmaVersion": 2020 "ecmaVersion": 2020

View File

@ -16,30 +16,28 @@ runs:
run: |- run: |-
set -exu; set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/; mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16' node-version: '16'
cache: 'yarn' cache: 'yarn'
- name: install dependencies - name: install dependencies
shell: bash shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1 run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v2 - uses: actions/cache@v3
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: | python-version: 3.9
2.7
3.9
- name: Install python deps
shell: bash
run: pip install docker-compose
- name: Setup python2 test environment - name: Setup python2 test environment
shell: bash shell: bash
run: | run: |
sudo apt-get install -y libdigest-hmac-perl sudo apt-get install -y libdigest-hmac-perl
pip install virtualenv pip install 's3cmd==2.3.0'
virtualenv -p $(which python2) ~/.virtualenv/py2 - name: fix sproxyd.conf permissions
source ~/.virtualenv/py2/bin/activate shell: bash
pip install 's3cmd==1.6.1' run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -39,6 +39,12 @@ services:
- MONGODB_RS=rs0 - MONGODB_RS=rs0
- DEFAULT_BUCKET_KEY_FORMAT - DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS - METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file: env_file:
- creds.env - creds.env
depends_on: depends_on:
@ -66,14 +72,21 @@ services:
pykmip: pykmip:
network_mode: "host" network_mode: "host"
profiles: ['pykmip'] profiles: ['pykmip']
image: registry.scality.com/cloudserver-dev/pykmip image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes: volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts - /tmp/artifacts/${JOB_NAME}:/artifacts
mongo: mongo:
network_mode: "host" network_mode: "host"
profiles: ['mongo', 'ceph'] profiles: ['mongo', 'ceph']
image: scality/ci-mongo:3.6.8 image: ${MONGODB_IMAGE}
ceph: ceph:
network_mode: "host" network_mode: "host"
profiles: ['ceph'] profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

28
.github/docker/mongodb/Dockerfile vendored Normal file
View File

@ -0,0 +1,28 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -0,0 +1,4 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

15
.github/docker/mongodb/conf/mongod.conf vendored Normal file
View File

@ -0,0 +1,15 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

3
.github/docker/sproxyd/Dockerfile vendored Normal file
View File

@ -0,0 +1,3 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -0,0 +1,26 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

88
.github/docker/sproxyd/conf/nginx.conf vendored Normal file
View File

@ -0,0 +1,88 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -0,0 +1,12 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -0,0 +1,43 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -1,7 +1,10 @@
name: Test alerts name: Test alerts
on: on:
push push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs: jobs:
run-alert-tests: run-alert-tests:
@ -17,13 +20,16 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }} - name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.1 uses: scality/action-prom-render-test@1.0.3
with: with:
alert_file_path: monitoring/alerts.yaml alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }} test_file_path: ${{ matrix.tests.file }}
alert_inputs: >- alert_inputs: |
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3 namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/codeql.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
---
name: codeQL
on:
push:
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -0,0 +1,16 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -1,5 +1,6 @@
--- ---
name: release name: release
run-name: release ${{ inputs.tag }}
on: on:
workflow_dispatch: workflow_dispatch:
@ -9,58 +10,69 @@ on:
required: true required: true
env: env:
REGISTRY_NAME: registry.scality.com
PROJECT_NAME: ${{ github.event.repository.name }} PROJECT_NAME: ${{ github.event.repository.name }}
jobs: jobs:
build-federation-image: build-federation-image:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 runs-on: ubuntu-20.04
secrets: inherit steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with: with:
push: true push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: . context: .
file: images/svc-base/Dockerfile file: images/svc-base/Dockerfile
tag: ${{ github.event.inputs.tag }}-svc-base tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
- name: Set up Docker Buildk - name: Set up Docker Buildk
uses: docker/setup-buildx-action@v1 uses: docker/setup-buildx-action@v3
- name: Login to Registry - name: Login to Registry
uses: docker/login-action@v1 uses: docker/login-action@v3
with: with:
registry: ${{ env.REGISTRY_NAME }} registry: ghcr.io
username: ${{ secrets.REGISTRY_LOGIN }} username: ${{ github.repository_owner }}
password: ${{ secrets.REGISTRY_PASSWORD }} password: ${{ github.token }}
- name: Push dashboards into the production namespace - name: Push dashboards into the production namespace
run: | run: |
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \ oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \ dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring working-directory: monitoring
- name: Build and push - name: Build and push
uses: docker/build-push-action@v2 uses: docker/build-push-action@v5
with: with:
context: . context: .
push: true push: true
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }} tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=max
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ github.token }}
with: with:
name: Release ${{ github.event.inputs.tag }} name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }} tag_name: ${{ github.event.inputs.tag }}

View File

@ -2,6 +2,8 @@
name: tests name: tests
on: on:
workflow_dispatch:
push: push:
branches-ignore: branches-ignore:
- 'development/**' - 'development/**'
@ -65,23 +67,24 @@ env:
ENABLE_LOCAL_CACHE: "true" ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1" REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1" REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs: jobs:
linting-coverage: linting-coverage:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
- uses: actions/setup-node@v2 - uses: actions/setup-node@v4
with: with:
node-version: '16' node-version: '16'
cache: yarn cache: yarn
- name: install dependencies - name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1 run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v4 - uses: actions/setup-python@v5
with: with:
python-version: '3.9' python-version: '3.9'
- uses: actions/cache@v2 - uses: actions/cache@v4
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: ${{ runner.os }}-pip key: ${{ runner.os }}-pip
@ -114,7 +117,7 @@ jobs:
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";" find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always() if: always()
- name: Upload files to artifacts - name: Upload files to artifacts
uses: scality/action-artifacts@v2 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -125,61 +128,88 @@ jobs:
build: build:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0 uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v1.10.0 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ github.token }}
- name: Login to Registry
uses: docker/login-action@v1
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push cloudserver image - name: Build and push cloudserver image
uses: docker/build-push-action@v3 uses: docker/build-push-action@v5
with: with:
push: true push: true
context: . context: .
provenance: false provenance: false
tags: | tags: |
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} ghcr.io/${{ github.repository }}:${{ github.sha }}
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }} labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend: multiple-backend:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
env: env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple S3DATA: multiple
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker-compose up -d run: docker compose --profile sproxyd up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run multiple backend test - name: Run multiple backend test
run: |- run: |-
set -o pipefail; set -o pipefail;
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -198,26 +228,26 @@ jobs:
S3KMS: file S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0 DEFAULT_BUCKET_KEY_FORMAT: v0
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker-compose --profile mongo up -d run: docker compose --profile mongo up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run functional tests - name: Run functional tests
run: |- run: |-
set -o pipefail; set -o pipefail;
source ~/.virtualenv/py2/bin/activate
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -237,27 +267,27 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1 DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1 METADATA_MAX_CACHED_BUCKETS: 1
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker-compose --profile mongo up -d run: docker compose --profile mongo up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run functional tests - name: Run functional tests
run: |- run: |-
set -o pipefail; set -o pipefail;
source ~/.virtualenv/py2/bin/activate
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env: env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -267,30 +297,40 @@ jobs:
if: always() if: always()
file-ft-tests: file-ft-tests:
strategy:
matrix:
include:
- job-name: file-ft-tests
name: ${{ matrix.job-name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: build needs: build
env: env:
S3BACKEND: file S3BACKEND: file
S3VAULT: mem S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes" MPU_TESTING: "yes"
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ matrix.job-name }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
shell: bash
run: |
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services - name: Setup CI services
run: docker-compose up -d run: docker compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file ft tests - name: Run file ft tests
run: |- run: |-
set -o pipefail; set -o pipefail;
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
source ~/.virtualenv/py2/bin/activate yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -306,15 +346,16 @@ jobs:
ENABLE_UTAPI_V2: t ENABLE_UTAPI_V2: t
S3BACKEND: mem S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Setup CI services - name: Setup CI services
run: docker-compose up -d run: docker compose up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file utapi v2 tests - name: Run file utapi v2 tests
run: |- run: |-
@ -322,7 +363,51 @@ jobs:
bash wait_for_local_port.bash 8000 40 bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -338,18 +423,20 @@ jobs:
S3BACKEND: file S3BACKEND: file
S3VAULT: mem S3VAULT: mem
MPU_TESTING: "yes" MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- name: Copy KMIP certs - name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip working-directory: .github/pykmip
- name: Setup CI services - name: Setup CI services
run: docker-compose --profile pykmip up -d run: docker compose --profile pykmip up -d
working-directory: .github/docker working-directory: .github/docker
- name: Run file KMIP tests - name: Run file KMIP tests
run: |- run: |-
@ -358,7 +445,7 @@ jobs:
bash wait_for_local_port.bash 5696 40 bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net
@ -377,30 +464,31 @@ jobs:
CI_CEPH: 'true' CI_CEPH: 'true'
MPU_TESTING: "yes" MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }} MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }} JOB_NAME: ${{ github.job }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: Login to GitHub Registry - name: Login to GitHub Registry
uses: docker/login-action@v1.10.0 uses: docker/login-action@v3
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ github.token }}
- name: Setup CI environment - name: Setup CI environment
uses: ./.github/actions/setup-ci uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1 - uses: ruby/setup-ruby@v1
with: with:
ruby-version: '2.5.0' ruby-version: '2.5.9'
- name: Install Ruby dependencies - name: Install Ruby dependencies
run: | run: |
gem install nokogiri:1.12.5 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5 gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies - name: Install Java dependencies
run: | run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services - name: Setup CI services
run: docker-compose --profile ceph up -d run: docker compose --profile ceph up -d
working-directory: .github/docker working-directory: .github/docker
env: env:
S3METADATA: mongodb S3METADATA: mongodb
@ -422,12 +510,11 @@ jobs:
- name: Run Ruby tests - name: Run Ruby tests
run: |- run: |-
set -ex -o pipefail; set -ex -o pipefail;
rspec tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests - name: Run Javascript AWS SDK tests
run: |- run: |-
set -ex -o pipefail; set -ex -o pipefail;
source ~/.virtualenv/py2/bin/activate
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log; yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log; yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env: env:
@ -436,7 +523,7 @@ jobs:
S3VAULT: mem S3VAULT: mem
S3METADATA: mongodb S3METADATA: mongodb
- name: Upload logs to artifacts - name: Upload logs to artifacts
uses: scality/action-artifacts@v3 uses: scality/action-artifacts@v4
with: with:
method: upload method: upload
url: https://artifacts.scality.net url: https://artifacts.scality.net

View File

@ -1,4 +1,4 @@
ARG NODE_VERSION=16.17.1-bullseye-slim ARG NODE_VERSION=16.20-bullseye-slim
FROM node:${NODE_VERSION} as builder FROM node:${NODE_VERSION} as builder
@ -23,6 +23,7 @@ RUN apt-get update \
ENV PYTHON=python3 ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/ COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1 RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################ ################################################################################
@ -42,6 +43,7 @@ EXPOSE 8002
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
jq \ jq \
tini \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app WORKDIR /usr/src/app
@ -53,6 +55,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "yarn", "start" ]

175
README.md
View File

@ -1,10 +1,7 @@
# Zenko CloudServer # Zenko CloudServer with Vitastor Backend
![Zenko CloudServer logo](res/scality-cloudserver-logo.png) ![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview ## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -14,137 +11,71 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud. backend data storage both on-premise or public in the cloud.
CloudServer is useful for Developers, either to run as part of a This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
continous integration test environment to emulate the AWS S3 service locally backend support.
or as an abstraction layer to develop object storage enabled
application on the go.
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/) ## Quick Start with Vitastor
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/) Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## Docker Installation instructions:
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/) ### Install Vitastor
## Contributing Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the ### Install Zenko with Vitastor Backend
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
## Installation - Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Dependencies ### Install and Configure MongoDB
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Clone source code ### Setup Zenko
```shell - Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
git clone https://github.com/scality/S3.git - Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
``` ```
### Install js dependencies ```
AWS_ACCESS_KEY_ID=accessKey1 \
Go to the ./S3 folder, AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```shell
yarn install --frozen-lockfile
``` ```
If you get an error regarding installation of the diskUsage module, # Author & License
please install g++.
If you get an error regarding level-down bindings, try clearing your yarn cache: - [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
```shell (a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,6 +4,7 @@
"metricsPort": 8002, "metricsPort": 8002,
"metricsListenOn": [], "metricsListenOn": [],
"replicationGroupId": "RG001", "replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": { "restEndpoints": {
"localhost": "us-east-1", "localhost": "us-east-1",
"127.0.0.1": "us-east-1", "127.0.0.1": "us-east-1",
@ -101,6 +102,14 @@
"readPreference": "primary", "readPreference": "primary",
"database": "metadata" "database": "metadata"
}, },
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": { "externalBackends": {
"aws_s3": { "aws_s3": {
"httpAgent": { "httpAgent": {

71
config.json.vitastor Normal file
View File

@ -0,0 +1,71 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,8 @@ const constants = {
], ],
// user metadata header to set object locationConstraint // user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint', objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties // declare here all existing service accounts and their properties
// (if any, otherwise an empty object) // (if any, otherwise an empty object)
@ -129,7 +130,7 @@ const constants = {
}, },
}, },
/* eslint-disable camelcase */ /* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true }, externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
// some of the available data backends (if called directly rather // some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided // than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods. // as a string as first parameter of the get/delete methods.
@ -175,6 +176,8 @@ const constants = {
'objectDeleteTagging', 'objectDeleteTagging',
'objectGetTagging', 'objectGetTagging',
'objectPutTagging', 'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
], ],
// response header to be sent when there are invalid // response header to be sent when there are invalid
// user metadata in the object's metadata // user metadata in the object's metadata
@ -195,11 +198,51 @@ const constants = {
'user', 'user',
'bucket', 'bucket',
], ],
arrayOfAllowed: [
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'], allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'], allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [ lifecycleListing: {
'STANDARD', CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan',
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
], ],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
]),
supportedSignatureChecksums: new Set([
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
}; };
module.exports = constants; module.exports = constants;

View File

@ -199,6 +199,10 @@ if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json mv config.json.tmp config.json

View File

@ -2,11 +2,12 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com). Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has two namespaces there: CloudServer has a few images there:
* Production Namespace: registry.scality.com/cloudserver * Cloudserver container image: ghcr.io/scality/cloudserver
* Dev Namespace: registry.scality.com/cloudserver-dev * Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -18,8 +19,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash> docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull registry.scality.com/cloudserver/cloudserver:<tag> docker pull ghcr.io/scality/cloudserver:<tag>
``` ```
## Release Process ## Release Process

View File

@ -1,4 +1,4 @@
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0 FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
@ -14,8 +14,10 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \ git config --global --add safe.directory . && \
git lfs install && \ git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \ GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \ yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors # run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed! # we might have to check if the symlinking is really needed!

View File

@ -1,3 +1,10 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')(); require('./lib/server.js')();

View File

@ -8,16 +8,18 @@ const crypto = require('crypto');
const { v4: uuidv4 } = require('uuid'); const { v4: uuidv4 } = require('uuid');
const cronParser = require('cron-parser'); const cronParser = require('cron-parser');
const joi = require('@hapi/joi'); const joi = require('@hapi/joi');
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
const { isValidBucketName } = require('arsenal').s3routes.routesUtils; const { isValidBucketName } = s3routes.routesUtils;
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig; const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder'); const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const validExternalBackends = require('../constants').externalBackends; const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex, const { azureAccountNameRegex, base64Regex,
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates, allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
} = require('../constants'); } = require('../constants');
const { utapiVersion } = require('utapi'); const { utapiVersion } = require('utapi');
const { scaleMsPerDay } = s3middleware.objectUtils;
const constants = require('../constants');
// config paths // config paths
const configSearchPaths = [ const configSearchPaths = [
@ -105,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config'); return joi.attempt(configSproxyd, joiSchema, 'bad config');
} }
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) { function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object', assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints'); 'bad config: restEndpoints must be an object of endpoints');
@ -237,6 +280,60 @@ function hdClientLocationConstraintAssert(configHd) {
return hdclientFields; return hdclientFields;
} }
function azureArchiveLocationConstraintAssert(locationObj) {
const checkedFields = [
'azureContainerName',
'azureStorageEndpoint',
];
if (Object.keys(locationObj.details).length === 0 ||
!checkedFields.every(field => field in locationObj.details)) {
return;
}
const {
azureContainerName,
azureStorageEndpoint,
} = locationObj.details;
const stringFields = [
azureContainerName,
azureStorageEndpoint,
];
stringFields.forEach(field => {
assert(typeof field === 'string',
`bad config: ${field} must be a string`);
});
let hasAuthMethod = false;
if (locationObj.details.sasToken !== undefined) {
assert(typeof locationObj.details.sasToken === 'string',
`bad config: ${locationObj.details.sasToken} must be a string`);
hasAuthMethod = true;
}
if (locationObj.details.azureStorageAccountName !== undefined &&
locationObj.details.azureStorageAccessKey !== undefined) {
assert(typeof locationObj.details.azureStorageAccountName === 'string',
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
if (locationObj.details.tenantId !== undefined &&
locationObj.details.clientId !== undefined &&
locationObj.details.clientKey !== undefined) {
assert(typeof locationObj.details.tenantId === 'string',
`bad config: ${locationObj.details.tenantId} must be a string`);
assert(typeof locationObj.details.clientId === 'string',
`bad config: ${locationObj.details.clientId} must be a string`);
assert(typeof locationObj.details.clientKey === 'string',
`bad config: ${locationObj.details.clientKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
assert(hasAuthMethod, 'Missing authentication method');
}
function dmfLocationConstraintAssert(locationObj) { function dmfLocationConstraintAssert(locationObj) {
const checkedFields = [ const checkedFields = [
'endpoint', 'endpoint',
@ -280,7 +377,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) { function locationConstraintAssert(locationConstraints) {
const supportedBackends = const supportedBackends =
['mem', 'file', 'scality', ['mem', 'file', 'scality',
'mongodb', 'dmf'].concat(Object.keys(validExternalBackends)); 'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object', assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object'); 'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => { Object.keys(locationConstraints).forEach(l => {
@ -391,6 +488,9 @@ function locationConstraintAssert(locationConstraints) {
if (locationConstraints[l].type === 'dmf') { if (locationConstraints[l].type === 'dmf') {
dmfLocationConstraintAssert(locationConstraints[l]); dmfLocationConstraintAssert(locationConstraints[l]);
} }
if (locationConstraints[l].type === 'azure_archive') {
azureArchiveLocationConstraintAssert(locationConstraints[l]);
}
if (locationConstraints[l].type === 'pfs') { if (locationConstraints[l].type === 'pfs') {
assert(typeof details.pfsDaemonEndpoint === 'object', assert(typeof details.pfsDaemonEndpoint === 'object',
'bad config: pfsDaemonEndpoint is mandatory and must be an object'); 'bad config: pfsDaemonEndpoint is mandatory and must be an object');
@ -402,26 +502,33 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient); locationConstraints[l].details.connector.hdclient);
} }
}); });
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
} }
function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) { function parseUtapiReindex(config) {
const {
enabled,
schedule,
redis,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean', assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean'); 'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object'); const parsedRedis = parseRedisConfig(redis);
assert(typeof sentinel.port === 'number', assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi.reindex.sentinel.port must be a number'); 'bad config: utapi reindex redis config requires a list of sentinels');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object', assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object'); 'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number', assert(typeof bucketd.port === 'number',
'bad config: utapi.reindex.bucketd.port must be a number'); 'bad config: utapi.reindex.bucketd.port must be a number');
assert(typeof schedule === 'string', assert(typeof schedule === 'string',
'bad config: utapi.reindex.schedule must be a string'); 'bad config: utapi.reindex.schedule must be a string');
if (onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
}
try { try {
cronParser.parseExpression(schedule); cronParser.parseExpression(schedule);
} catch (e) { } catch (e) {
@ -429,6 +536,13 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
'bad config: utapi.reindex.schedule must be a valid ' + 'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`); `cron schedule. ${e.message}.`);
} }
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
} }
function requestsConfigAssert(requestsConfig) { function requestsConfigAssert(requestsConfig) {
@ -516,7 +630,6 @@ class Config extends EventEmitter {
// Read config automatically // Read config automatically
this._getLocationConfig(); this._getLocationConfig();
this._getConfig(); this._getConfig();
this._configureBackends();
} }
_getLocationConfig() { _getLocationConfig() {
@ -728,11 +841,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints; this.websiteEndpoints = config.websiteEndpoints;
} }
this.clusters = false; this.workers = false;
if (config.clusters !== undefined) { if (config.workers !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0, assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: clusters must be a positive integer'); 'bad config: workers must be a positive integer');
this.clusters = config.clusters; this.workers = config.workers;
} }
if (config.usEastBehavior !== undefined) { if (config.usEastBehavior !== undefined) {
@ -970,8 +1083,7 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number', assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number'); 'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) { if (config.localCache.password !== undefined) {
assert( assert(typeof config.localCache.password === 'string',
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' + 'config: vad password for localCache. password must' +
' be a string'); ' be a string');
} }
@ -997,56 +1109,46 @@ class Config extends EventEmitter {
} }
if (config.redis) { if (config.redis) {
if (config.redis.sentinels) { this.redis = parseRedisConfig(config.redis);
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
} }
if (config.scuba) {
if (config.redis.sentinelPassword !== undefined) { this.scuba = {};
assert( if (config.scuba.host) {
this._verifyRedisPassword(config.redis.sentinelPassword)); assert(typeof config.scuba.host === 'string',
this.redis.sentinelPassword = config.redis.sentinelPassword; 'bad config: scuba host must be a string');
this.scuba.host = config.scuba.host;
} }
} else { if (config.scuba.port) {
// check for standalone configuration assert(Number.isInteger(config.scuba.port)
this.redis = {}; && config.scuba.port > 0,
assert(typeof config.redis.host === 'string', 'bad config: scuba port must be a positive integer');
'bad config: redis.host must be a string'); this.scuba.port = config.scuba.port;
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
} }
} }
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) { if (config.utapi) {
this.utapi = { component: 's3' }; this.utapi = { component: 's3' };
if (config.utapi.host) { if (config.utapi.host) {
@ -1075,50 +1177,8 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' + assert(config.redis, 'missing required property of utapi ' +
'configuration: redis'); 'configuration: redis');
if (config.utapi.redis) { if (config.utapi.redis) {
if (config.utapi.redis.sentinels) { this.utapi.redis = parseRedisConfig(config.utapi.redis);
this.utapi.redis = { sentinels: [], name: null }; if (this.utapi.redis.retry === undefined) {
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = { this.utapi.redis.retry = {
connectBackoff: { connectBackoff: {
min: 10, min: 10,
@ -1129,22 +1189,6 @@ class Config extends EventEmitter {
}, },
}; };
} }
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
} }
if (config.utapi.metrics) { if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics; this.utapi.metrics = config.utapi.metrics;
@ -1214,8 +1258,7 @@ class Config extends EventEmitter {
} }
if (config.utapi && config.utapi.reindex) { if (config.utapi && config.utapi.reindex) {
parseUtapiReindex(config.utapi.reindex); this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
} }
} }
@ -1260,6 +1303,8 @@ class Config extends EventEmitter {
} }
} }
this.authdata = config.authdata || 'authdata.json';
this.kms = {}; this.kms = {};
if (config.kms) { if (config.kms) {
assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.userName === 'string');
@ -1479,25 +1524,6 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs; this.outboundProxy.certs = certObj.certs;
} }
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint: // Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file, // try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort. // then create a fresh one as last resort.
@ -1550,6 +1576,10 @@ class Config extends EventEmitter {
requestsConfigAssert(config.requests); requestsConfigAssert(config.requests);
this.requests = config.requests; this.requests = config.requests;
} }
// CLDSRV-378: on 8.x branches, null version compatibility
// mode is enforced because null keys are not supported by the
// MongoDB backend.
this.nullVersionCompatMode = true;
if (config.bucketNotificationDestinations) { if (config.bucketNotificationDestinations) {
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations); this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
} }
@ -1558,37 +1588,102 @@ class Config extends EventEmitter {
// Version of the configuration we're running under // Version of the configuration we're running under
this.overlayVersion = config.overlayVersion || 0; this.overlayVersion = config.overlayVersion || 0;
this._setTimeOptions();
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
this.multiObjectDeleteConcurrency = extractedNumber;
}
this.multiObjectDeleteEnableOptimizations = true;
if (config.multiObjectDeleteEnableOptimizations === false) {
this.multiObjectDeleteEnableOptimizations = false;
}
this.testingMode = config.testingMode || false;
this.maxScannedLifecycleListingEntries = constants.maxScannedLifecycleListingEntries;
if (config.maxScannedLifecycleListingEntries !== undefined) {
// maxScannedLifecycleListingEntries > 2 is required as a minimum because we must
// scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
assert(Number.isInteger(config.maxScannedLifecycleListingEntries) &&
config.maxScannedLifecycleListingEntries > 2,
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
// expedite the lifecycle of objects.
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
// The scaledMsPerDay value is initially set to the number of milliseconds per day
// (24 * 60 * 60 * 1000) as the default value.
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
// earlier in time.
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
this.timeOptions = {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
};
}
getTimeOptions() {
return this.timeOptions;
} }
_getAuthData() { _getAuthData() {
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json')); return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
} }
_configureBackends() { _configureBackends(config) {
const backends = config.backends || {};
/** /**
* Configure the backends for Authentication, Data and Metadata. * Configure the backends for Authentication, Data and Metadata.
*/ */
let auth = 'mem'; let auth = backends.auth || 'mem';
let data = 'multiple'; let data = backends.data || 'multiple';
let metadata = 'file'; let metadata = backends.metadata || 'file';
let kms = 'file'; let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) { if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi']; const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1, assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' + 'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi' 'should be one of mem/file/scality/cdmi'
); );
auth = process.env.S3BACKEND; auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
data = process.env.S3BACKEND; data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND; metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND; kms = process.env.S3BACKEND;
} }
if (process.env.S3VAULT) { if (process.env.S3VAULT) {
auth = process.env.S3VAULT; auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
} }
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') { if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file // Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData; let authData;
if (process.env.SCALITY_ACCESS_KEY_ID && if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) { process.env.SCALITY_SECRET_ACCESS_KEY) {
@ -1617,10 +1712,10 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple' 'should be one of mem/file/scality/multiple'
); );
data = process.env.S3DATA; data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') { if (data === 'scality' || data === 'multiple') {
data = 'multiple'; data = 'multiple';
} }
}
assert(this.locationConstraints !== undefined && assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined, this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set' 'bad config: locationConstraints and restEndpoints must be set'
@ -1632,18 +1727,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) { if (process.env.S3KMS) {
kms = process.env.S3KMS; kms = process.env.S3KMS;
} }
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = { this.backends = {
auth, auth,
data, data,
metadata, metadata,
kms, kms,
quota,
}; };
} }
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) { setAuthDataAccounts(accounts) {
this.authData.accounts = accounts; this.authData.accounts = accounts;
this.emit('authdata-update'); this.emit('authdata-update');
@ -1766,10 +1861,19 @@ class Config extends EventEmitter {
.update(instanceId) .update(instanceId)
.digest('hex'); .digest('hex');
} }
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
} }
module.exports = { module.exports = {
parseSproxydConfig, parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert, locationConstraintAssert,
ConfigObject: Config, ConfigObject: Config,
config: new Config(), config: new Config(),
@ -1777,4 +1881,5 @@ module.exports = {
bucketNotifAssert, bucketNotifAssert,
azureGetStorageAccountName, azureGetStorageAccountName,
azureGetLocationCredentials, azureGetLocationCredentials,
azureArchiveLocationConstraintAssert,
}; };

View File

@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite'); const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy'); const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet'); const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL'); const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors'); const bucketGetCors = require('./bucketGetCors');
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification'); const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock'); const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy'); const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption'); const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead'); const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut'); const { bucketPut } = require('./bucketPut');
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption'); const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy'); const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock'); const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication'); const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication'); const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight'); const corsPreflight = require('./corsPreflight');
@ -44,7 +47,7 @@ const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete'); const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete'); const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy'); const objectCopy = require('./objectCopy');
const objectDelete = require('./objectDelete'); const { objectDelete } = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging'); const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet'); const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL'); const objectGetACL = require('./objectGetACL');
@ -64,8 +67,7 @@ const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts'); = require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet'); const serviceGet = require('./serviceGet');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const websiteGet = require('./websiteGet'); const website = require('./website');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue'); const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders'); const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource'); const parseCopySource = require('./apiUtils/object/parseCopySource');
@ -83,6 +85,10 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server // Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod; request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod]; const actionLog = monitoringMap[apiMethod];
if (!actionLog && if (!actionLog &&
@ -117,6 +123,7 @@ const api = {
// no need to check auth on website or cors preflight requests // no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') { apiMethod === 'corsPreflight') {
request.actionImplicitDenies = false;
return this[apiMethod](request, log, callback); return this[apiMethod](request, log, callback);
} }
@ -139,15 +146,25 @@ const api = {
const requestContexts = prepareRequestContexts(apiMethod, request, const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId); sourceBucket, sourceObject, sourceVersionId);
// Extract all the _apiMethods and store them in an array
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
// Attach the names to the current request
// eslint-disable-next-line no-param-reassign
request.apiMethods = apiMethods;
function checkAuthResults(authResults) { function checkAuthResults(authResults) {
let returnTagCount = true; let returnTagCount = true;
const isImplicitDeny = {};
let isOnlyImplicitDeny = true;
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action // first item checks s3:GetObject(Version) action
if (!authResults[0].isAllowed) { if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
log.trace('get object authorization denial from Vault'); log.trace('get object authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
// TODO add support for returnTagCount in the bucket policy
// checks
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
// second item checks s3:GetObject(Version)Tagging action // second item checks s3:GetObject(Version)Tagging action
if (!authResults[1].isAllowed) { if (!authResults[1].isAllowed) {
log.trace('get tagging authorization denial ' + log.trace('get tagging authorization denial ' +
@ -156,25 +173,41 @@ const api = {
} }
} else { } else {
for (let i = 0; i < authResults.length; i++) { for (let i = 0; i < authResults.length; i++) {
if (!authResults[i].isAllowed) { isImplicitDeny[authResults[i].action] = true;
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
// Any explicit deny rejects the current API call
log.trace('authorization denial from Vault'); log.trace('authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
if (authResults[i].isAllowed) {
// If the action is allowed, the result is not implicit
// Deny.
isImplicitDeny[authResults[i].action] = false;
isOnlyImplicitDeny = false;
} }
} }
return returnTagCount; }
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
} }
return async.waterfall([ return async.waterfall([
next => auth.server.doAuth( next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params) => { request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) { if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err }); log.trace('authentication error', { error: err });
return next(err); return next(arsenalError);
} }
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts), }, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, next) => { (userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() }; const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) { if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName(); authNames.userName = userInfo.getIAMdisplayName();
@ -184,7 +217,7 @@ const api = {
} }
log.addDefaultFields(authNames); log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
} }
// issue 100 Continue to the client // issue 100 Continue to the client
writeContinue(request, response); writeContinue(request, response);
@ -215,12 +248,12 @@ const api = {
} }
// Convert array of post buffers into one string // Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString(); request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}); });
return undefined; return undefined;
}, },
// Tag condition keys require information from CloudServer for evaluation // Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth( (userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults, authorizationResults,
request, request,
requestContexts, requestContexts,
@ -231,33 +264,47 @@ const api = {
log.trace('tag authentication error', { error: err }); log.trace('tag authentication error', { error: err });
return next(err); return next(err);
} }
return next(null, userInfo, authResultsWithTags, streamingV4Params); return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
}, },
), ),
], (err, userInfo, authorizationResults, streamingV4Params) => { ], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) { if (err) {
return callback(err); return callback(err);
} }
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) { if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults); const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) { if (checkedResults instanceof Error) {
return callback(checkedResults); return callback(checkedResults);
} }
returnTagCount = checkedResults; returnTagCount = checkedResults.returnTagCount;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
// thus it is explicitly allowed, so implicit deny is false
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
acc[curr] = false;
return acc;
}, {});
} }
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response; request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params, return this[apiMethod](userInfo, request, streamingV4Params,
log, callback, authorizationResults); log, methodCallback, authorizationResults);
} }
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket, return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback); sourceObject, sourceVersionId, log, methodCallback);
} }
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback); return this[apiMethod](userInfo, request, returnTagCount, log, callback);
} }
return this[apiMethod](userInfo, request, log, callback); return this[apiMethod](userInfo, request, log, methodCallback);
}); });
}, },
bucketDelete, bucketDelete,
@ -284,11 +331,14 @@ const api = {
bucketPutReplication, bucketPutReplication,
bucketGetReplication, bucketGetReplication,
bucketDeleteReplication, bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle, bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle, bucketGetLifecycle,
bucketDeleteLifecycle, bucketDeleteLifecycle,
bucketPutPolicy, bucketPutPolicy,
bucketGetPolicy, bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy, bucketDeletePolicy,
bucketPutObjectLock, bucketPutObjectLock,
bucketPutNotification, bucketPutNotification,
@ -320,8 +370,8 @@ const api = {
objectPutRetention, objectPutRetention,
objectRestore, objectRestore,
serviceGet, serviceGet,
websiteGet, websiteGet: website,
websiteHead, websiteHead: website,
}; };
module.exports = api; module.exports = api;

View File

@ -1,11 +1,23 @@
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies; const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { config } = require('../../../Config');
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants; const {
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
// whitelist buckets to allow public read on objects // whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ? const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : []; ? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) { function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/'); const canonicalIDArray = canonicalID.split('/');
@ -26,13 +38,41 @@ function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo); return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
} }
function checkBucketAcls(bucket, requestType, canonicalID) { /**
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
* @param {Bucket} bucket - The bucket to check access control for.
* @param {string} requestType - The list of s3 actions to check within the API call.
* @param {string} canonicalID - The canonical ID of the user making the request.
* @param {string} mainApiCall - The main API call (first item of the requestType).
*
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
*/
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) { if (bucket.getOwner() === canonicalID) {
return true; return true;
} }
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
}
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
if (requestType === 'bucketGet' || requestType === 'bucketHead') { if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
if (bucketAcl.Canned === 'public-read' if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write' || bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read' || (bucketAcl.Canned === 'authenticated-read'
@ -50,7 +90,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
return true; return true;
} }
} }
if (requestType === 'bucketGetACL') { if (requestTypeParsed === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write' if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === logId) && canonicalID === logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -66,7 +106,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
} }
} }
if (requestType === 'bucketPutACL') { if (requestTypeParsed === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) { || bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true; return true;
@ -80,11 +120,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
} }
} }
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) { if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
return true;
}
if (requestType === 'objectDelete' || requestType === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write' if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) { || bucketAcl.WRITE.indexOf(canonicalID) > -1) {
@ -104,25 +140,39 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket // objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check // authorization check should just return true so can move on to check
// rights at the object level. // rights at the object level.
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' || return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
requestType === 'objectGet' || requestType === 'objectHead'); || requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
} }
function checkObjectAcls(bucket, objectMD, requestType, canonicalID) { function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed // acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestType) if (bucketOwnerActions.includes(requestTypeParsed)
&& (bucketOwner === canonicalID)) { && (bucketOwner === canonicalID)) {
return true; return true;
} }
if (objectMD['owner-id'] === canonicalID) { if (objectMD['owner-id'] === canonicalID) {
return true; return true;
} }
// Backward compatibility
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (!objectMD.acl) { if (!objectMD.acl) {
return false; return false;
} }
if (requestType === 'objectGet' || requestType === 'objectHead') { if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
if (objectMD.acl.Canned === 'public-read' if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write' || objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read' || (objectMD.acl.Canned === 'authenticated-read'
@ -148,11 +198,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
// User is already authorized on the bucket for FULL_CONTROL or WRITE or // User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write // bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') { if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
return true; return true;
} }
if (requestType === 'objectPutACL') { if (requestTypeParsed === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -168,7 +218,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
} }
} }
if (requestType === 'objectGetACL') { if (requestTypeParsed === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -187,9 +237,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
// allow public reads on buckets that are whitelisted for anonymous reads // allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented // TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) && const allowPublicReads = publicReadBuckets.includes(bucket.getName())
bucketAcl.Canned === 'public-read' && && bucketAcl.Canned === 'public-read'
(requestType === 'objectGet' || requestType === 'objectHead'); && (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
if (allowPublicReads) { if (allowPublicReads) {
return true; return true;
} }
@ -216,6 +266,20 @@ function _checkBucketPolicyResources(request, resource, log) {
return evaluators.isResourceApplicable(requestContext, resource, log); return evaluators.isResourceApplicable(requestContext, resource, log);
} }
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) { function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc... // account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12); return arn.substr(13, 12);
@ -260,11 +324,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
return false; return false;
} }
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) { function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
let permission = 'defaultDeny'; let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be // if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy) // allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID) { if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
permission = 'allow'; permission = 'allow';
} }
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement)); let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
@ -273,12 +337,13 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal); const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log); const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log); const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') { if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately // explicit deny trumps any allows, so return immediately
return 'explicitDeny'; return 'explicitDeny';
} }
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') { if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
permission = 'allow'; permission = 'allow';
} }
copiedStatement = copiedStatement.splice(1); copiedStatement = copiedStatement.splice(1);
@ -286,7 +351,37 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
return permission; return permission;
} }
function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request) { function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
request, aclPermission, results, actionImplicitDenies) {
const bucketPolicy = bucket.getBucketPolicy();
let processedResult = results[requestType];
if (!bucketPolicy) {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
} else if (bucketPolicyPermission === 'allow') {
processedResult = true;
} else {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
}
}
return processedResult;
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
// Check to see if user is authorized to perform a // Check to see if user is authorized to perform a
// particular action on bucket based on ACLs. // particular action on bucket based on ACLs.
// TODO: Add IAM checks // TODO: Add IAM checks
@ -297,69 +392,100 @@ function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, req
arn = authInfo.getArn(); arn = authInfo.getArn();
} }
// if the bucket owner is an account, users should not have default access // if the bucket owner is an account, users should not have default access
if (((bucket.getOwner() === canonicalID) && requesterIsNotUser) if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|| isServiceAccount(canonicalID)) { results[_requestType] = actionImplicitDenies[_requestType] === false;
return true; return results[_requestType];
} }
const aclPermission = checkBucketAcls(bucket, requestType, canonicalID); const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
const bucketPolicy = bucket.getBucketPolicy(); // In case of error bucket access is checked with bucketGet
if (!bucketPolicy) { // For website, bucket policy only uses objectGet and ignores bucketGet
return aclPermission; // https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
} }
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
canonicalID, arn, bucket.getOwner(), log, request); request, aclPermission, results, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') { });
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function isObjAuthorized(bucket, objectMD, requestType, canonicalID, authInfo, log, request) { function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
log, request) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
let arn = null;
if (authInfo) {
arn = authInfo.getArn();
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, true, results, actionImplicitDenies);
});
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
const mainApiCall = requestTypes[0];
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
const parsedMethodName = _requestType.endsWith('Version')
? _requestType.slice(0, -7) : _requestType;
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
if (!objectMD) { if (!objectMD) {
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') {
return true;
}
// check bucket has read access // check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions // 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
return isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request); let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
&& results[_requestType] === false) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
}
return results[_requestType];
} }
let requesterIsNotUser = true; let requesterIsNotUser = true;
let arn = null; let arn = null;
let isUserUnauthenticated = false;
if (authInfo) { if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo); requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn(); arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
} }
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) { if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
return true; results[_requestType] = actionImplicitDenies[_requestType] === false;
} return results[_requestType];
if (isServiceAccount(canonicalID)) {
return true;
} }
// account is authorized if: // account is authorized if:
// - requesttype is included in bucketOwnerActions and // - requesttype is included in bucketOwnerActions and
// - account is the bucket owner // - account is the bucket owner
// - requester is account, not user // - requester is account, not user
if (bucketOwnerActions.includes(requestType) if (bucketOwnerActions.includes(parsedMethodName)
&& (bucketOwner === canonicalID) && (bucketOwner === canonicalID)
&& requesterIsNotUser) { && requesterIsNotUser) {
return true; results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
} }
const aclPermission = checkObjectAcls(bucket, objectMD, requestType, const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
canonicalID); canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
const bucketPolicy = bucket.getBucketPolicy(); return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
if (!bucketPolicy) { log, request, aclPermission, results, actionImplicitDenies);
return aclPermission; });
}
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
canonicalID, arn, bucket.getOwner(), log, request);
if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function _checkResource(resource, bucketArn) { function _checkResource(resource, bucketArn) {
@ -388,6 +514,117 @@ function validatePolicyResource(bucketName, policy) {
}); });
} }
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = { module.exports = {
isBucketAuthorized, isBucketAuthorized,
isObjAuthorized, isObjAuthorized,
@ -398,4 +635,7 @@ module.exports = {
checkBucketAcls, checkBucketAcls,
checkObjectAcls, checkObjectAcls,
validatePolicyResource, validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
}; };

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3'); apiMethod, 's3');
} }
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') { if (apiMethod === 'bucketPut') {
return null; return null;
} }
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = []; const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy' if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') { || apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' : const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet'; 'objectGet';

View File

@ -24,7 +24,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
}); });
} }
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) { function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => { async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ? const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter; oldSplitter : splitter;
@ -40,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
byteLength: partSizeSum, byteLength: partSizeSum,
}); });
next(err); next(err);
}); }, request);
}, cb); }, cb);
} }
/** /**
@ -49,11 +49,13 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
* @param {object} bucketMD - bucket attributes/metadata * @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored * @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester * @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger * @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete * @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined} * @return {undefined}
*/ */
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) { function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
log.trace('deleting bucket from metadata'); log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string'); assert.strictEqual(typeof canonicalID, 'string');
@ -100,7 +102,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
} }
if (objectsListRes.Contents.length) { if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName, return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, log, err => { bucketMD, objectsListRes.Contents, request, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -30,6 +30,9 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client // Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' && (requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' && requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) { requestType !== 'bucketDelete')) {
return true; return true;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck'); const locationConstraintCheck = require('../object/locationConstraintCheck');
const { metadataValidateBucketAndObj } = const { standardMetadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils'); require('../../../metadata/metadataUtils');
const services = require('../../../services'); const services = require('../../../services');
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: 'multipartDelete', preciseRequestType: request.apiMethods || 'multipartDelete',
request, request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
@ -22,10 +22,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
// but the requestType is the more general 'objectDelete' // but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams); const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut'; metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
metadataValidateBucketAndObj(metadataValParams, log, standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket); return next(err, destinationBucket);
@ -56,9 +57,14 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) { next) {
const location = mpuOverviewObj.controllingLocationConstraint; const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName, return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log, request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => { (err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -2,11 +2,13 @@
* Code based on Yutaka Oishi (Fujifilm) contributions * Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020 * Date: 11 Sep 2020
*/ */
const ObjectMDArchive = require('arsenal').models.ObjectMDArchive; const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors; const errors = require('arsenal').errors;
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Get response header "x-amz-restore" * Get response header "x-amz-restore"
* Be called by objectHead.js * Be called by objectHead.js
@ -32,7 +34,6 @@ function getAmzRestoreResHeader(objMD) {
return undefined; return undefined;
} }
/** /**
* Check if restore can be done. * Check if restore can be done.
* *
@ -41,6 +42,23 @@ function getAmzRestoreResHeader(objMD) {
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled * @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/ */
function _validateStartRestore(objectMD, log) { function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold; const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) { if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage, // return InvalidObjectState error if the object is not in cold storage,
@ -52,18 +70,7 @@ function _validateStartRestore(objectMD, log) {
}); });
return errors.InvalidObjectState; return errors.InvalidObjectState;
} }
if (objectMD.archive?.restoreCompletedAt if (objectMD.archive?.restoreRequestedAt) {
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored // return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists // check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.', log.debug('The object is currently being restored.',
@ -120,22 +127,36 @@ function validatePutVersionId(objMD, versionId, log) {
} }
/** /**
* Check if the object is already restored * Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
* *
* @param {ObjectMD} objectMD - object metadata * @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger * @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored * @return {boolean} - true if the object is already restored
*/ */
function isObjectAlreadyRestored(objectMD, log) { function _updateObjectExpirationDate(objectMD, log) {
// check if restoreCompletedAt field exists // Check if restoreCompletedAt field exists
// and archive.restoreWillExpireAt > current time // Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt // checked earlier in the process, so checking again here would create weird states
&& new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now()); const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', log.debug('The restore status of the object.', {
{
isObjectAlreadyRestored, isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored' method: 'isObjectAlreadyRestored'
}); });
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored; return isObjectAlreadyRestored;
} }
@ -195,12 +216,32 @@ function startRestore(objectMD, restoreParam, log, cb) {
if (updateResultError) { if (updateResultError) {
return cb(updateResultError); return cb(updateResultError);
} }
return cb(null, isObjectAlreadyRestored(objectMD, log)); const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
} }
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = { module.exports = {
startRestore, startRestore,
getAmzRestoreResHeader, getAmzRestoreResHeader,
validatePutVersionId, validatePutVersionId,
verifyColdObjectAvailable,
}; };

View File

@ -5,7 +5,6 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const services = require('../../../services'); const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject'); const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck'); const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning'); const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
@ -21,7 +20,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.'; 'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) { metadataStoreParams, dataToDelete, log, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo, services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => { cipherBundle, metadataStoreParams, (err, result) => {
if (err) { if (err) {
@ -31,7 +30,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ? const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null; dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod, return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, deleteLog, err => callback(err, result)); newDataStoreName, log, err => callback(err, result));
} }
return callback(null, result); return callback(null, result);
}); });
@ -51,7 +50,9 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing * @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and * accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable) * credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance * @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function * @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) - * @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version * result.contentMD5 - content md5 of new object or version
@ -59,7 +60,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/ */
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
log, callback) { overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id']; const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === ''; const isPutVersion = putVersionId || putVersionId === '';
@ -115,6 +116,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker, isDeleteMarker,
replicationInfo: getReplicationInfo( replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo), objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log, log,
}; };
@ -141,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']); removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires; metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging']; metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = 's3:ObjectCreated:Put'; metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration(); = bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) { if (defaultObjectLockConfiguration) {
@ -156,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] = request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader]; objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated'; metadataStoreParams.originOp = originOp;
} }
const backendInfoObj = const backendInfoObj =
@ -195,10 +197,9 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
const dontSkipBackend = externalBackends; const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly']; const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size']; const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([ return async.waterfall([
function storeData(next) { function storeData(next) {
if (size === 0) { if (size === 0) {
@ -287,11 +288,13 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
metadataStoreParams.versionId = options.versionId; metadataStoreParams.versionId = options.versionId;
metadataStoreParams.versioning = options.versioning; metadataStoreParams.versioning = options.versioning;
metadataStoreParams.isNull = options.isNull; metadataStoreParams.isNull = options.isNull;
metadataStoreParams.nullVersionId = options.nullVersionId; metadataStoreParams.deleteNullKey = options.deleteNullKey;
metadataStoreParams.nullUploadId = options.nullUploadId; if (options.extraMD) {
Object.assign(metadataStoreParams, options.extraMD);
}
return _storeInMDandDeleteData(bucketName, infoArr, return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams, cipherBundle, metadataStoreParams,
options.dataToDelete, requestLogger, requestMethod, next); options.dataToDelete, log, requestMethod, next);
}, },
], callback); ], callback);
} }

View File

@ -0,0 +1,18 @@
/**
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
* @param {BucketInfo} bucket - bucket object
* @return {boolean} whether objects require oplog updates on deletion, or not
*/
function _bucketRequiresOplogUpdate(bucket) {
// Default behavior is to require an oplog update
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
return true;
}
// If the bucket has lifecycle configuration or notification configuration
// set, we also require an oplog update
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
}
module.exports = {
_bucketRequiresOplogUpdate,
};

View File

@ -4,23 +4,25 @@ const {
LifecycleDateTime, LifecycleDateTime,
LifecycleUtils, LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers; } = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
// moves lifecycle transition deadlines 1 day earlier, mostly for testing const {
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true'; expireOneDayEarlier,
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing transitionOneDayEarlier,
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true'; timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({ const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier, transitionOneDayEarlier,
expireOneDayEarlier, expireOneDayEarlier,
timeProgressionFactor,
}); });
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime); const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
function calculateDate(objDate, expDays, datetime) { function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay); return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
} }
function formatExpirationHeader(date, id) { function formatExpirationHeader(date, id) {
@ -37,8 +39,10 @@ const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
function _generateExpHeadersObjects(rules, params, datetime) { function _generateExpHeadersObjects(rules, params, datetime) {
const tags = { const tags = {
TagSet: Object.keys(params.tags) TagSet: params.tags
.map(key => ({ Key: key, Value: params.tags[key] })), ? Object.keys(params.tags)
.map(key => ({ Key: key, Value: params.tags[key] }))
: [],
}; };
const objectInfo = { Key: params.key }; const objectInfo = { Key: params.key };

View File

@ -0,0 +1,190 @@
const { versioning } = require('arsenal');
const versionIdUtils = versioning.VersionID;
const { lifecycleListing } = require('../../../../constants');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
function _makeTags(tags) {
const res = [];
Object.entries(tags).forEach(([key, value]) =>
res.push(
{
Key: key,
Value: value,
}
));
return res;
}
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
IsLatest: true, // for compatibility with AWS ListObjectVersions.
DataStoreName: v.dataStoreName,
ListType: CURRENT_TYPE,
};
// NOTE: The current versions listed to be lifecycle should include version id
// if the bucket is versioned.
if (isBucketVersioned) {
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
content.VersionId = versionId;
}
data.Contents.push(content);
});
return data;
}
function _encodeVersionId(vid) {
let versionId = vid;
if (versionId && versionId !== 'null') {
versionId = versionIdUtils.encode(versionId);
}
return versionId;
}
function processNonCurrents(bucketName, listParams, list) {
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
KeyMarker: listParams.keyMarker,
VersionIdMarker: versionIdMarker,
BeforeDate: listParams.beforeDate,
NextKeyMarker: list.NextKeyMarker,
NextVersionIdMarker: nextVersionIdMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
staleDate: v.staleDate, // lowerCamelCase to be compatible with existing lifecycle.
VersionId: versionId,
DataStoreName: v.dataStoreName,
ListType: NON_CURRENT_TYPE,
};
data.Contents.push(content);
});
return data;
}
function processOrphans(bucketName, listParams, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
data.Contents.push({
Key: item.key,
LastModified: v.LastModified,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
VersionId: versionId,
IsLatest: true, // for compatibility with AWS ListObjectVersions.
ListType: ORPHAN_DM_TYPE,
});
});
return data;
}
function getLocationConstraintErrorMessage(locationName) {
return 'value of the location you are attempting to set ' +
`- ${locationName} - is not listed in the locationConstraint config`;
}
/**
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
*
* @param {object} params - Query parameters
* @param {object} config - CloudServer configuration
* @param {number} min - Minimum number of entries to be scanned
* @returns {Object} - An object indicating the validation result:
* - isValid (boolean): Whether the validation is successful.
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
*/
function validateMaxScannedEntries(params, config, min) {
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
if (params['max-scanned-lifecycle-listing-entries']) {
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
maxEntriesParams > maxScannedLifecycleListingEntries) {
return { isValid: false };
}
maxScannedLifecycleListingEntries = maxEntriesParams;
}
return { isValid: true, maxScannedLifecycleListingEntries };
}
module.exports = {
processCurrents,
processNonCurrents,
processOrphans,
getLocationConstraintErrorMessage,
validateMaxScannedEntries,
};

View File

@ -3,7 +3,9 @@ const moment = require('moment');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const vault = require('../../../auth/vault'); const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Calculates retain until date for the locked object version * Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period * @param {object} retention - includes days or years retention period
@ -19,8 +21,9 @@ function calculateRetainUntilDate(retention) {
const date = moment(); const date = moment();
// Calculate the number of days to retain the lock on the object // Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365; const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate const retainUntilDate
= date.add(retainUntilDays, 'days'); = date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString(); return retainUntilDate.toISOString();
} }
/** /**
@ -202,7 +205,13 @@ class ObjectLockInfo {
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted) * @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
*/ */
canModifyObject(hasGovernanceBypass) { canModifyObject(hasGovernanceBypass) {
return !this.isLocked() || (this.isGovernanceMode() && !!hasGovernanceBypass); // can modify object if object is not locked
// cannot modify object in any cases if legal hold is enabled
// if no legal hold, can only modify object if bypassing governance when locked
if (!this.isLocked()) {
return true;
}
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
} }
/** /**
@ -296,7 +305,9 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
if (err) { if (err) {
return cb(err); return cb(err);
} }
if (authorizationResults[0].isAllowed !== true) { const explicitDenyExists = authorizationResults.some(
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user', log.trace('authorization check failed for user',
{ {
'method': 'checkUserPolicyGovernanceBypass', 'method': 'checkUserPolicyGovernanceBypass',
@ -304,7 +315,25 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
}); });
return cb(errors.AccessDenied); return cb(errors.AccessDenied);
} }
return cb(null); // Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
}); });
} }

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning'); const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore; const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/** /**
* Check if tier is supported * Check if tier is supported
@ -58,13 +58,22 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId: decodedVidResult, versionId: decodedVidResult,
requestType: 'restoreObject', requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
}; };
return async.waterfall([ return async.waterfall([
// get metadata of bucket and object // get metadata of bucket and object
function validateBucketAndObject(next) { function validateBucketAndObject(next) {
return mdUtils.metadataValidateBucketAndObj(mdValueParams, log, (err, bucketMD, objectMD) => { return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
log, (err, bucketMD, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', { method: METHOD, error: err }); log.trace('request authorization failed', { method: METHOD, error: err });
return next(err); return next(err);
@ -115,6 +124,16 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD); return next(err, bucketMD, objectMD);
}); });
}, },
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) { function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {}; const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -0,0 +1,32 @@
const { errors } = require('arsenal');
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
function validateChecksumHeaders(headers) {
// If the x-amz-trailer header is present the request is using one of the
// trailing checksum algorithms, which are not supported.
if (headers['x-amz-trailer'] !== undefined) {
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
}
const signatureChecksum = headers['x-amz-content-sha256'];
if (signatureChecksum === undefined) {
return null;
}
if (supportedSignatureChecksums.has(signatureChecksum)) {
return null;
}
// If the value is not one of the possible checksum algorithms
// the only other valid value is the actual sha256 checksum of the payload.
// Do a simple sanity check of the length to guard against future algos.
// If the value is an unknown algo, then it will fail checksum validation.
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
return null;
}
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
}
module.exports = validateChecksumHeaders;

View File

@ -4,7 +4,7 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const oneDay = 24 * 60 * 60 * 1000; const { scaledMsPerDay } = config.getTimeOptions();
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata // Use Arsenal function to generate a version ID used internally by metadata
@ -58,7 +58,7 @@ function decodeVersionId(reqQuery) {
*/ */
function getVersionIdResHeader(verCfg, objectMD) { function getVersionIdResHeader(verCfg, objectMD) {
if (verCfg) { if (verCfg) {
if (objectMD.isNull || (objectMD && !objectMD.versionId)) { if (objectMD.isNull || !objectMD.versionId) {
return 'null'; return 'null';
} }
return versionIdUtils.encode(objectMD.versionId); return versionIdUtils.encode(objectMD.versionId);
@ -79,17 +79,34 @@ function checkQueryVersionId(query) {
return undefined; return undefined;
} }
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) { function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) {
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => { // In compatibility mode, create null versioned keys instead of null keys
let versionId;
let nullVersionMD;
if (config.nullVersionCompatMode) {
versionId = nullVersionId;
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
});
} else {
versionId = 'null';
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
isNull2: true,
});
}
metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => {
if (err) { if (err) {
log.debug('error from metadata storing null version as new version', log.debug('error from metadata storing null version as new version',
{ error: err }); { error: err });
} }
cb(err, options); cb(err);
}); });
} }
/** get location of null version data for deletion /** check existence and get location of null version data for deletion
* @param {string} bucketName - name of bucket * @param {string} bucketName - name of bucket
* @param {string} objKey - name of object key * @param {string} objKey - name of object key
* @param {object} options - metadata options for getting object MD * @param {object} options - metadata options for getting object MD
@ -100,49 +117,55 @@ function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
* @param {function} cb - callback * @param {function} cb - callback
* @return {undefined} - and call callback with (err, dataToDelete) * @return {undefined} - and call callback with (err, dataToDelete)
*/ */
function _getNullVersionsToDelete(bucketName, objKey, options, mst, log, cb) { function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) {
const nullOptions = {};
if (!options.deleteData) {
return process.nextTick(cb, null, nullOptions);
}
if (options.versionId === mst.versionId) { if (options.versionId === mst.versionId) {
// no need to get delete location, we already have the master's metadata // no need to get another key as the master is the target
const dataToDelete = mst.objLocation; nullOptions.dataToDelete = mst.objLocation;
return process.nextTick(cb, null, dataToDelete); return process.nextTick(cb, null, nullOptions);
}
if (options.versionId === 'null') {
// deletion of the null key will be done by the main metadata
// PUT via this option
nullOptions.deleteNullKey = true;
} }
return metadata.getObjectMD(bucketName, objKey, options, log, return metadata.getObjectMD(bucketName, objKey, options, log,
(err, versionMD) => { (err, versionMD) => {
if (err) { if (err) {
log.debug('err from metadata getting specified version', { // the null key may not exist, hence it's a normal
error: err, // situation to have a NoSuchKey error, in which case
method: '_getNullVersionsToDelete', // there is nothing to delete
if (err.is.NoSuchKey) {
log.debug('null version does not exist', {
method: '_prepareNullVersionDeletion',
}); });
} else {
log.warn('could not get null version metadata', {
error: err,
method: '_prepareNullVersionDeletion',
});
}
return cb(err); return cb(err);
} }
if (!versionMD.location) { if (versionMD.location) {
return cb();
}
const dataToDelete = Array.isArray(versionMD.location) ? const dataToDelete = Array.isArray(versionMD.location) ?
versionMD.location : [versionMD.location]; versionMD.location : [versionMD.location];
return cb(null, dataToDelete); nullOptions.dataToDelete = dataToDelete;
}
return cb(null, nullOptions);
}); });
} }
function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) { function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
return _getNullVersionsToDelete(bucketName, objKey, options, mst, log, return metadata.deleteObjectMD(bucketName, objKey, options, log, err => {
(err, nullDataToDelete) => {
if (err) { if (err) {
log.warn('could not find null version metadata', { log.warn('metadata error deleting null versioned key',
error: err, { bucketName, objKey, error: err, method: '_deleteNullVersionMD' });
method: '_deleteNullVersionMD',
});
return cb(err);
} }
return metadata.deleteObjectMD(bucketName, objKey, options, log,
err => {
if (err) {
log.warn('metadata error deleting null version',
{ error: err, method: '_deleteNullVersionMD' });
return cb(err); return cb(err);
}
return cb(null, nullDataToDelete);
});
}); });
} }
@ -153,73 +176,103 @@ function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
* @param {object} mst - state of master version, as returned by * @param {object} mst - state of master version, as returned by
* getMasterState() * getMasterState()
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended' * @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode and return appropriate values: this mode
* does not attempt to create null keys but create null versioned keys
* instead
* *
* @return {object} result object with the following attributes: * @return {object} result object with the following attributes:
* - {object} options: versioning-related options to pass to the * - {object} options: versioning-related options to pass to the
services.metadataStoreObject() call services.metadataStoreObject() call
* - {object} [storeOptions]: options for metadata to create a new * - {object} [options.extraMD]: extra attributes to set in object metadata
null version key, if needed * - {string} [nullVersionId]: null version key to create, if needed
* - {object} [delOptions]: options for metadata to delete the null * - {object} [delOptions]: options for metadata to delete the null
version key, if needed version key, if needed
*/ */
function processVersioningState(mst, vstat) { function processVersioningState(mst, vstat, nullVersionCompatMode) {
const options = {}; const versioningSuspended = (vstat === 'Suspended');
const storeOptions = {}; const masterIsNull = mst.exists && (mst.isNull || !mst.versionId);
const delOptions = {};
// object does not exist or is not versioned (before versioning) if (versioningSuspended) {
if (mst.versionId === undefined || mst.isNull) { // versioning is suspended: overwrite the existing null version
// versioning is suspended, overwrite existing master version const options = { versionId: '', isNull: true };
if (vstat === 'Suspended') { if (masterIsNull) {
options.versionId = ''; // if the null version exists, clean it up prior to put
options.isNull = true; if (mst.objLocation) {
options.dataToDelete = mst.objLocation; options.dataToDelete = mst.objLocation;
// if null version exists, clean it up prior to put
if (mst.isNull) {
delOptions.versionId = mst.versionId;
if (mst.uploadId) {
delOptions.replayId = mst.uploadId;
} }
// backward-compat: a null version key may exist even with
// a null master (due to S3C-7526), if so, delete it (its
// data will be deleted as part of the master cleanup, so
// no "deleteData" param is needed)
//
// "isNull2" attribute is set in master metadata when
// null keys are used, which is used as an optimization to
// avoid having to check the versioned key since there can
// be no more versioned key to clean up
if (mst.isNull && mst.versionId && !mst.isNull2) {
const delOptions = { versionId: mst.versionId };
return { options, delOptions }; return { options, delOptions };
} }
return { options }; return { options };
} }
// versioning is enabled, create a new version if (mst.nullVersionId) {
options.versioning = true; // backward-compat: delete the null versioned key and data
if (mst.exists) { const delOptions = { versionId: mst.nullVersionId, deleteData: true };
// store master version in a new key
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
storeOptions.versionId = versionId;
storeOptions.isNull = true;
options.nullVersionId = versionId;
// non-versioned (non-null) MPU objects don't have a
// replay ID, so don't reference their uploadId
if (mst.isNull && mst.uploadId) {
options.nullUploadId = mst.uploadId;
}
return { options, storeOptions };
}
return { options };
}
// master is versioned and is not a null version
const nullVersionId = mst.nullVersionId;
if (vstat === 'Suspended') {
// versioning is suspended, overwrite the existing master version
options.versionId = '';
options.isNull = true;
if (nullVersionId === undefined) {
return { options };
}
delOptions.versionId = nullVersionId;
if (mst.nullUploadId) { if (mst.nullUploadId) {
delOptions.replayId = mst.nullUploadId; delOptions.replayId = mst.nullUploadId;
} }
return { options, delOptions }; return { options, delOptions };
} }
// versioning is enabled, put the new version // clean up the eventual null key's location data prior to put
options.versioning = true;
options.nullVersionId = nullVersionId; // NOTE: due to metadata v1 internal format, we cannot guess
// from the master key whether there is an associated null
// key, because the master key may be removed whenever the
// latest version becomes a delete marker. Hence we need to
// pessimistically try to get the null key metadata and delete
// it if it exists.
const delOptions = { versionId: 'null', deleteData: true };
return { options, delOptions };
}
// versioning is enabled: create a new version
const options = { versioning: true };
if (masterIsNull) {
// if master is a null version or a non-versioned key,
// copy it to a new null key
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
if (nullVersionCompatMode) {
options.extraMD = {
nullVersionId,
};
if (mst.uploadId) {
options.extraMD.nullUploadId = mst.uploadId;
}
return { options, nullVersionId };
}
if (mst.isNull && !mst.isNull2) {
// if master null version was put with an older
// Cloudserver (or in compat mode), there is a
// possibility that it also has a null versioned key
// associated, so we need to delete it as we write the
// null key
const delOptions = {
versionId: nullVersionId,
};
return { options, nullVersionId, delOptions };
}
return { options, nullVersionId };
}
// backward-compat: keep a reference to the existing null
// versioned key
if (mst.nullVersionId) {
options.extraMD = {
nullVersionId: mst.nullVersionId,
};
if (mst.nullUploadId) { if (mst.nullUploadId) {
options.nullUploadId = mst.nullUploadId; options.extraMD.nullUploadId = mst.nullUploadId;
}
} }
return { options }; return { options };
} }
@ -246,6 +299,7 @@ function getMasterState(objMD) {
versionId: objMD.versionId, versionId: objMD.versionId,
uploadId: objMD.uploadId, uploadId: objMD.uploadId,
isNull: objMD.isNull, isNull: objMD.isNull,
isNull2: objMD.isNull2,
nullVersionId: objMD.nullVersionId, nullVersionId: objMD.nullVersionId,
nullUploadId: objMD.nullUploadId, nullUploadId: objMD.nullUploadId,
}; };
@ -269,9 +323,6 @@ function getMasterState(objMD) {
* ('' overwrites the master version) * ('' overwrites the master version)
* options.versioning - (true/undefined) metadata instruction to create new ver * options.versioning - (true/undefined) metadata instruction to create new ver
* options.isNull - (true/undefined) whether new version is null or not * options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
* options.deleteNullVersionData - whether to delete the data of the null ver
*/ */
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD, function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
log, callback) { log, callback) {
@ -283,42 +334,102 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
return process.nextTick(callback, null, options); return process.nextTick(callback, null, options);
} }
// bucket is versioning configured // bucket is versioning configured
const { options, storeOptions, delOptions } = const { options, nullVersionId, delOptions } =
processVersioningState(mst, vCfg.Status); processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode);
return async.series([ return async.series([
function storeVersion(next) { function storeNullVersionMD(next) {
if (!storeOptions) { if (!nullVersionId) {
return process.nextTick(next); return process.nextTick(next);
} }
const versionMD = Object.assign({}, objMD, storeOptions); return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next);
const params = { versionId: storeOptions.versionId };
return _storeNullVersionMD(bucketName, objectKey, versionMD,
params, log, next);
}, },
function deleteNullVersion(next) { function prepareNullVersionDeletion(next) {
if (!delOptions) { if (!delOptions) {
return process.nextTick(next); return process.nextTick(next);
} }
return _deleteNullVersionMD(bucketName, objectKey, delOptions, mst, return _prepareNullVersionDeletion(
log, (err, nullDataToDelete) => { bucketName, objectKey, delOptions, mst, log,
(err, nullOptions) => {
if (err) { if (err) {
log.warn('unexpected error deleting null version md', { return next(err);
error: err,
method: 'versioningPreprocessing',
});
// it's possible there was a concurrent request to
// delete the null version, so proceed with putting a
// new version
if (err.is.NoSuchKey) {
return next(null, options);
} }
return next(errors.InternalError); Object.assign(options, nullOptions);
}
Object.assign(options, { dataToDelete: nullDataToDelete });
return next(); return next();
}); });
}, },
], err => callback(err, options)); function deleteNullVersionMD(next) {
if (delOptions &&
delOptions.versionId &&
delOptions.versionId !== 'null') {
// backward-compat: delete old null versioned key
return _deleteNullVersionMD(
bucketName, objectKey, { versionId: delOptions.versionId }, log, next);
}
return process.nextTick(next);
},
], err => {
// it's possible there was a prior request that deleted the
// null version, so proceed with putting a new version
if (err && err.is.NoSuchKey) {
return callback(null, options);
}
return callback(err, options);
});
}
/** Return options to pass to Metadata layer for version-specific
* operations with the given requested version ID
*
* @param {object} objectMD - object metadata
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode
* @return {object} options object with params:
* {string} [options.versionId] - specific versionId to update
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're updating or deleting a new-style null
* version (stored in master or null key), or not a null version.
*/
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
// Use the internal versionId if it is a "real" null version (not
// non-versioned)
//
// If the target object is non-versioned: do not specify a
// "versionId" attribute nor "isNull"
//
// If the target version is a null version, i.e. has the "isNull"
// attribute:
//
// - send the "isNull=true" param to Metadata if the version is
// already a null key put by a non-compat mode Cloudserver, to
// let Metadata know that the null key is to be updated or
// deleted. This is the case if the "isNull2" metadata attribute
// exists
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that it is a legacy null version
//
// If the target version is not a null version and is versioned:
//
// - send the "isNull=false" param to Metadata in non-compat
// mode (mandatory for v1 format)
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that an existing null version may not be stored in a
// null key
//
//
if (objectMD.versionId === undefined) {
return {};
}
const options = { versionId: objectMD.versionId };
if (objectMD.isNull) {
if (objectMD.isNull2) {
options.isNull = true;
}
} else if (!nullVersionCompatMode) {
options.isNull = false;
}
return options;
} }
/** preprocessingVersioningDelete - return versioning information for S3 to /** preprocessingVersioningDelete - return versioning information for S3 to
@ -327,33 +438,69 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
* @param {object} bucketMD - bucket metadata * @param {object} bucketMD - bucket metadata
* @param {object} objectMD - obj metadata * @param {object} objectMD - obj metadata
* @param {string} [reqVersionId] - specific version ID sent as part of request * @param {string} [reqVersionId] - specific version ID sent as part of request
* @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode
* @return {object} options object with params: * @return {object} options object with params:
* options.deleteData - (true/undefined) whether to delete data (if undefined * {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
* means creating a delete marker instead) * means creating a delete marker instead)
* options.versionId - specific versionId to delete * {string} [options.versionId] - specific versionId to delete
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're deleting a new-style null version (stored
* in master or null key), or not a null version.
*/ */
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId) { function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
const options = {}; let options = {};
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
}
if (!bucketMD.getVersioningConfiguration() || reqVersionId) { if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
// delete data if bucket is non-versioned or the request // delete data if bucket is non-versioned or the request
// deletes a specific version // deletes a specific version
options.deleteData = true; options.deleteData = true;
} }
if (bucketMD.getVersioningConfiguration() && reqVersionId) {
if (reqVersionId === 'null') {
// deleting the 'null' version if it exists: use its
// internal versionId if it exists
if (objectMD.versionId !== undefined) {
options.versionId = objectMD.versionId;
}
} else {
// deleting a specific version
options.versionId = reqVersionId;
}
}
return options; return options;
} }
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle /** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id. * storing version metadata with a specific version id.
* @param {object} objMD - obj metadata * @param {object} objMD - obj metadata
@ -365,10 +512,8 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
* version id of the null version * version id of the null version
*/ */
function overwritingVersioning(objMD, metadataStoreParams) { function overwritingVersioning(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.updateMicroVersionId = true; metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp // set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed'; metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
@ -381,7 +526,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
restoreRequestedAt: objMD.archive?.restoreRequestedAt, restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays, restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now), restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * oneDay)), restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
}; };
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
@ -390,8 +535,14 @@ function overwritingVersioning(objMD, metadataStoreParams) {
const options = { const options = {
versionId, versionId,
isNull: objMD.isNull, isNull: objMD.isNull,
};
if (objMD.nullVersionId) {
options.extraMD = {
nullVersionId: objMD.nullVersionId, nullVersionId: objMD.nullVersionId,
}; };
}
restoreMetadata(objMD, metadataStoreParams);
return options; return options;
} }
@ -403,6 +554,7 @@ module.exports = {
processVersioningState, processVersioningState,
getMasterState, getMasterState,
versioningPreprocessing, versioningPreprocessing,
getVersionSpecificMetadataOptions,
preprocessingVersioningDelete, preprocessingVersioningDelete,
overwritingVersioning, overwritingVersioning,
decodeVID, decodeVID,

View File

@ -101,8 +101,33 @@ function validateWebsiteHeader(header) {
header.startsWith('http://') || header.startsWith('https://')); header.startsWith('http://') || header.startsWith('https://'));
} }
/**
* appendWebsiteIndexDocument - append index to objectKey if necessary
* @param {object} request - normalized request object
* @param {string} indexDocumentSuffix - index document from website config
* @param {boolean} force - flag to force append index
* @return {undefined}
*/
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
const reqObjectKey = request.objectKey ? request.objectKey : '';
/* eslint-disable no-param-reassign */
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
request.objectKey += indexDocumentSuffix;
// find index document if no key provided
} else if (reqObjectKey === '') {
request.objectKey = indexDocumentSuffix;
// force for redirect 302 on folder without trailing / that has an index
} else if (force) {
request.objectKey += `/${indexDocumentSuffix}`;
}
/* eslint-enable no-param-reassign */
}
module.exports = { module.exports = {
findRoutingRule, findRoutingRule,
extractRedirectInfo, extractRedirectInfo,
validateWebsiteHeader, validateWebsiteHeader,
appendWebsiteIndexDocument,
}; };

View File

@ -0,0 +1,314 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -0,0 +1,117 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processCurrents(bucketName, listParams, isBucketVersioned, list);
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
return callback(null, res);
}
/**
* listLifecycleCurrents - Return list of current versions/masters in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
const minEntriesToBeScanned = 1;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleCurrents',
request,
};
const listParams = {
listingType: 'DelimiterCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, isBucketVersioned, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback);
});
});
}
module.exports = {
listLifecycleCurrents,
};

View File

@ -0,0 +1,127 @@
const { errors, versioning } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processNonCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processNonCurrents(bucketName, listParams, list);
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleNonCurrents');
return callback(null, res);
}
/**
* listLifecycleNonCurrents - Return list of non-current versions in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleNonCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleNonCurrents',
request,
};
const listParams = {
listingType: 'DelimiterNonCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
keyMarker: params['key-marker'],
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined;
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleNonCurrents,
};

View File

@ -0,0 +1,112 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processOrphans(bucketName, listParams, list);
pushMetric('listLifecycleOrphanDeleteMarkers', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphanDeleteMarkers');
return callback(null, res);
}
/**
* listLifecycleOrphanDeleteMarkers - Return list of expired object delete marker in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleOrphanDeleteMarkers',
request,
};
const listParams = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned or suspended');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleOrphanDeleteMarkers,
};

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion'); const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucketMD) => { (err, bucketMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD); request.method, bucketMD);
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
log.trace('passed checks', log.trace('passed checks',
{ method: 'metadataValidateBucket' }); { method: 'metadataValidateBucket' });
return deleteBucket(authInfo, bucketMD, bucketName, return deleteBucket(authInfo, bucketMD, bucketName,
authInfo.getCanonicalID(), log, err => { authInfo.getCanonicalID(), request, log, err => {
if (err) { if (err) {
monitoring.promMetrics( monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket'); 'DELETE', bucketName, err.code, 'deleteBucket');

View File

@ -38,7 +38,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteCors', method: 'bucketDeleteCors',

View File

@ -1,7 +1,7 @@
const async = require('async'); const async = require('async');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteEncryption', requestType: request.apiMethods || 'bucketDeleteEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next), next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
const sseConfig = bucket.getServerSideEncryption(); const sseConfig = bucket.getServerSideEncryption();

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteLifecycle', requestType: request.apiMethods || 'bucketDeleteLifecycle',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeletePolicy', requestType: request.apiMethods || 'bucketDeletePolicy',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -0,0 +1,58 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteReplication', requestType: request.apiMethods || 'bucketDeleteReplication',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { waterfall } = require('async'); const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
@ -20,16 +20,20 @@ function bucketDeleteTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteTagging', requestType: request.apiMethods || 'bucketDeleteTagging',
request,
}; };
let bucket = null; let bucket = null;
return waterfall([ return waterfall([
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => { (err, b) => {
if (err) {
return next(err);
}
bucket = b; bucket = b;
bucket.setTags([]); bucket.setTags([]);
return next(err); return next();
}), }),
next => metadata.updateBucket(bucket.getName(), bucket, log, next), next => metadata.updateBucket(bucket.getName(), bucket, log, next),
], err => { ], err => {

View File

@ -30,7 +30,8 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteWebsite', method: 'bucketDeleteWebsite',

View File

@ -2,7 +2,7 @@ const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal'); const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -322,7 +322,7 @@ function bucketGet(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGet', requestType: request.apiMethods || 'bucketGet',
request, request,
}; };
const listParams = { const listParams = {
@ -345,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
listParams.marker = params.marker; listParams.marker = params.marker;
} }
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -1,5 +1,5 @@
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetACL', requestType: request.apiMethods || 'bucketGetACL',
request, request,
}; };
const grantInfo = { const grantInfo = {
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
}, },
}; };
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -39,7 +39,8 @@ function bucketGetCors(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketGetCors', method: 'bucketGetCors',

View File

@ -4,7 +4,7 @@ const async = require('async');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
/** /**
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetEncryption', requestType: request.apiMethods || 'bucketGetEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next), next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
// If sseInfo is present but the `mandatory` flag is not set // If sseInfo is present but the `mandatory` flag is not set

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const LifecycleConfiguration = const LifecycleConfiguration =
require('arsenal').models.LifecycleConfiguration; require('arsenal').models.LifecycleConfiguration;
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetLifecycle', requestType: request.apiMethods || 'bucketGetLifecycle',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -41,7 +41,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', { log.debug('access denied for account on bucket', {
requestType, requestType,
method: 'bucketGetLocation', method: 'bucketGetLocation',

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { NotificationConfiguration } = require('arsenal').models; const { NotificationConfiguration } = require('arsenal').models;
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetNotification', requestType: request.apiMethods || 'bucketGetNotification',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const ObjectLockConfiguration = const ObjectLockConfiguration =
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetObjectLock', requestType: request.apiMethods || 'bucketGetObjectLock',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetPolicy', requestType: request.apiMethods || 'bucketGetPolicy',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

58
lib/api/bucketGetQuota.js Normal file
View File

@ -0,0 +1,58 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfigurationXML } = const { getReplicationConfigurationXML } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
@ -21,10 +21,10 @@ function bucketGetReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetReplication', requestType: request.apiMethods || 'bucketGetReplication',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -67,7 +67,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetTagging', requestType: request.apiMethods || 'bucketGetTagging',
request, request,
}; };
let bucket = null; let bucket = null;
@ -75,7 +75,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
let tags = null; let tags = null;
return waterfall([ return waterfall([
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => { (err, b) => {
bucket = b; bucket = b;
return next(err); return next(err);

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -54,11 +54,11 @@ function bucketGetVersioning(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetVersioning', requestType: request.apiMethods || 'bucketGetVersioning',
request, request,
}; };
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -39,7 +39,8 @@ function bucketGetWebsite(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketGetWebsite', method: 'bucketGetWebsite',

View File

@ -1,5 +1,5 @@
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -19,10 +19,10 @@ function bucketHead(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketHead', requestType: request.apiMethods || 'bucketHead',
request, request,
}; };
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -45,9 +45,8 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) { } else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost]; locationConstraintChecked = restEndpoints[parsedHost];
} else { } else {
log.trace('no location constraint provided on bucket put;' + locationConstraintChecked = Object.keys(locationConstrains)[0];
'setting us-east-1'); log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
locationConstraintChecked = 'us-east-1';
} }
if (!locationConstraints[locationConstraintChecked]) { if (!locationConstraints[locationConstraintChecked]) {

View File

@ -6,7 +6,7 @@ const aclUtils = require('../utilities/aclUtils');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants'); const constants = require('../../constants');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -44,7 +44,7 @@ const monitoring = require('../utilities/monitoringHandler');
function bucketPutACL(authInfo, request, log, callback) { function bucketPutACL(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutACL' }); log.debug('processing request', { method: 'bucketPutACL' });
const bucketName = request.bucketName; const { bucketName } = request;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
const newCannedACL = request.headers['x-amz-acl']; const newCannedACL = request.headers['x-amz-acl'];
const possibleCannedACL = [ const possibleCannedACL = [
@ -54,19 +54,6 @@ function bucketPutACL(authInfo, request, log, callback) {
'authenticated-read', 'authenticated-read',
'log-delivery-write', 'log-delivery-write',
]; ];
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
log.trace('invalid canned acl argument', {
acl: newCannedACL,
method: 'bucketPutACL',
});
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument);
}
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header');
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument);
}
const possibleGroups = [constants.allAuthedUsersId, const possibleGroups = [constants.allAuthedUsersId,
constants.publicId, constants.publicId,
constants.logId, constants.logId,
@ -74,7 +61,7 @@ function bucketPutACL(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutACL', requestType: request.apiMethods || 'bucketPutACL',
request, request,
}; };
const possibleGrants = ['FULL_CONTROL', 'WRITE', const possibleGrants = ['FULL_CONTROL', 'WRITE',
@ -105,7 +92,7 @@ function bucketPutACL(authInfo, request, log, callback) {
return async.waterfall([ return async.waterfall([
function waterfall1(next) { function waterfall1(next) {
metadataValidateBucket(metadataValParams, log, standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
log.trace('request authorization failed', { log.trace('request authorization failed', {
@ -114,6 +101,18 @@ function bucketPutACL(authInfo, request, log, callback) {
}); });
return next(err, bucket); return next(err, bucket);
} }
// if the API call is allowed, ensure that the parameters are valid
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
log.trace('invalid canned acl argument', {
acl: newCannedACL,
method: 'bucketPutACL',
});
return next(errors.InvalidArgument);
}
if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header');
return next(errors.InvalidArgument);
}
return next(null, bucket); return next(null, bucket);
}); });
}, },

View File

@ -23,7 +23,7 @@ const requestType = 'bucketPutCors';
*/ */
function bucketPutCors(authInfo, request, log, callback) { function bucketPutCors(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutCors' }); log.debug('processing request', { method: 'bucketPutCors' });
const bucketName = request.bucketName; const { bucketName } = request;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (!request.post) { if (!request.post) {
@ -70,7 +70,8 @@ function bucketPutCors(authInfo, request, log, callback) {
}); });
}, },
function validateBucketAuthorization(bucket, rules, corsHeaders, next) { function validateBucketAuthorization(bucket, rules, corsHeaders, next) {
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', { log.debug('access denied for account on bucket', {
requestType, requestType,
}); });

View File

@ -3,7 +3,7 @@ const async = require('async');
const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption'); const { parseEncryptionXml } = require('./apiUtils/bucket/bucketEncryption');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const kms = require('../kms/wrapper'); const kms = require('../kms/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -18,17 +18,17 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
*/ */
function bucketPutEncryption(authInfo, request, log, callback) { function bucketPutEncryption(authInfo, request, log, callback) {
const bucketName = request.bucketName; const { bucketName } = request;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutEncryption', requestType: request.apiMethods || 'bucketPutEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next), next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
log.trace('parsing encryption config', { method: 'bucketPutEncryption' }); log.trace('parsing encryption config', { method: 'bucketPutEncryption' });

View File

@ -7,7 +7,7 @@ const config = require('../Config').config;
const parseXML = require('../utilities/parseXML'); const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -23,11 +23,11 @@ const monitoring = require('../utilities/monitoringHandler');
function bucketPutLifecycle(authInfo, request, log, callback) { function bucketPutLifecycle(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutLifecycle' }); log.debug('processing request', { method: 'bucketPutLifecycle' });
const bucketName = request.bucketName; const { bucketName } = request;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutLifecycle', requestType: request.apiMethods || 'bucketPutLifecycle',
request, request,
}; };
return waterfall([ return waterfall([
@ -45,7 +45,7 @@ function bucketPutLifecycle(authInfo, request, log, callback) {
return next(null, configObj); return next(null, configObj);
}); });
}, },
(lcConfig, next) => metadataValidateBucket(metadataValParams, log, (lcConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -4,7 +4,7 @@ const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration'); const getNotificationConfiguration = require('./apiUtils/bucket/getNotificationConfiguration');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
/** /**
@ -19,11 +19,11 @@ const { pushMetric } = require('../utapi/utilities');
function bucketPutNotification(authInfo, request, log, callback) { function bucketPutNotification(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutNotification' }); log.debug('processing request', { method: 'bucketPutNotification' });
const bucketName = request.bucketName; const { bucketName } = request;
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutNotification', requestType: request.apiMethods || 'bucketPutNotification',
request, request,
}; };
@ -34,7 +34,7 @@ function bucketPutNotification(authInfo, request, log, callback) {
const notifConfig = notificationConfig.error ? undefined : notificationConfig; const notifConfig = notificationConfig.error ? undefined : notificationConfig;
process.nextTick(() => next(notificationConfig.error, notifConfig)); process.nextTick(() => next(notificationConfig.error, notifConfig));
}, },
(notifConfig, next) => metadataValidateBucket(metadataValParams, log, (notifConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket, notifConfig)), (err, bucket) => next(err, bucket, notifConfig)),
(bucket, notifConfig, next) => { (bucket, notifConfig, next) => {
bucket.setNotificationConfiguration(notifConfig); bucket.setNotificationConfiguration(notifConfig);

View File

@ -1,13 +1,13 @@
const { waterfall } = require('async'); const { waterfall } = require('async');
const arsenal = require('arsenal'); const arsenal = require('arsenal');
const errors = arsenal.errors; const { errors } = arsenal;
const ObjectLockConfiguration = arsenal.models.ObjectLockConfiguration; const { models: { ObjectLockConfiguration } } = arsenal;
const parseXML = require('../utilities/parseXML'); const parseXML = require('../utilities/parseXML');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
/** /**
@ -26,7 +26,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutObjectLock', requestType: request.apiMethods || 'bucketPutObjectLock',
request, request,
}; };
return waterfall([ return waterfall([
@ -41,7 +41,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
return next(configObj.error || null, configObj); return next(configObj.error || null, configObj);
}); });
}, },
(objectLockConfig, next) => metadataValidateBucket(metadataValParams, (objectLockConfig, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies,
log, (err, bucket) => { log, (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -1,10 +1,9 @@
const async = require('async'); const async = require('async');
const { errors, models } = require('arsenal'); const { errors, models } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { validatePolicyResource } = const { validatePolicyResource, validatePolicyConditions } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const { BucketPolicy } = models; const { BucketPolicy } = models;
@ -17,8 +16,7 @@ const { BucketPolicy } = models;
function _checkNotImplementedPolicy(policyString) { function _checkNotImplementedPolicy(policyString) {
// bucket names and key names cannot include "", so including those // bucket names and key names cannot include "", so including those
// isolates not implemented keys // isolates not implemented keys
return policyString.includes('"Condition"') return policyString.includes('"Service"')
|| policyString.includes('"Service"')
|| policyString.includes('"Federated"'); || policyString.includes('"Federated"');
} }
@ -37,7 +35,7 @@ function bucketPutPolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutPolicy', requestType: request.apiMethods || 'bucketPutPolicy',
request, request,
}; };
@ -67,10 +65,10 @@ function bucketPutPolicy(authInfo, request, log, callback) {
return next(errors.MalformedPolicy.customizeDescription( return next(errors.MalformedPolicy.customizeDescription(
'Policy has invalid resource')); 'Policy has invalid resource'));
} }
return next(null, bucketPolicy); return next(validatePolicyConditions(bucketPolicy), bucketPolicy);
}); });
}, },
(bucketPolicy, next) => metadataValidateBucket(metadataValParams, log, (bucketPolicy, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => { (err, bucket) => {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);

View File

@ -2,7 +2,7 @@ const { waterfall } = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfiguration } = const { getReplicationConfiguration } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
@ -30,7 +30,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutReplication', requestType: request.apiMethods || 'bucketPutReplication',
request, request,
}; };
return waterfall([ return waterfall([
@ -39,7 +39,7 @@ function bucketPutReplication(authInfo, request, log, callback) {
// Check bucket user privileges and ensure versioning is 'Enabled'. // Check bucket user privileges and ensure versioning is 'Enabled'.
(config, next) => (config, next) =>
// TODO: Validate that destination bucket exists and has versioning. // TODO: Validate that destination bucket exists and has versioning.
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -3,7 +3,7 @@ const { s3middleware } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -38,11 +38,12 @@ function bucketPutTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutTagging', requestType: request.apiMethods || 'bucketPutTagging',
request,
}; };
let bucket = null; let bucket = null;
return waterfall([ return waterfall([
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => { (err, b) => {
bucket = b; bucket = b;
return next(err); return next(err);

View File

@ -4,7 +4,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const versioningNotImplBackends = const versioningNotImplBackends =
require('../../constants').versioningNotImplBackends; require('../../constants').versioningNotImplBackends;
@ -119,12 +119,12 @@ function bucketPutVersioning(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketPutVersioning', requestType: request.apiMethods || 'bucketPutVersioning',
request, request,
}; };
return waterfall([ return waterfall([
next => _parseXML(request, log, next), next => _parseXML(request, log, next),
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)), // ignore extra null object, (err, bucket) => next(err, bucket)), // ignore extra null object,
(bucket, next) => parseString(request.post, (err, result) => { (bucket, next) => parseString(request.post, (err, result) => {
// just for linting; there should not be any parsing error here // just for linting; there should not be any parsing error here

View File

@ -22,7 +22,7 @@ const requestType = 'bucketPutWebsite';
*/ */
function bucketPutWebsite(authInfo, request, log, callback) { function bucketPutWebsite(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketPutWebsite' }); log.debug('processing request', { method: 'bucketPutWebsite' });
const bucketName = request.bucketName; const { bucketName } = request;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
if (!request.post) { if (!request.post) {
@ -49,7 +49,8 @@ function bucketPutWebsite(authInfo, request, log, callback) {
}); });
}, },
function validateBucketAuthorization(bucket, config, next) { function validateBucketAuthorization(bucket, config, next) {
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketPutWebsite', method: 'bucketPutWebsite',

View File

@ -0,0 +1,85 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketUpdateQuota',
request,
};
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketUpdateQuota'
});
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketUpdateQuota;

View File

@ -12,7 +12,7 @@ const constants = require('../../constants');
const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning } const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck'); = require('./apiUtils/object/locationConstraintCheck');
const { skipMpuPartProcessing } = storage.data.external.backendUtils; const { skipMpuPartProcessing } = storage.data.external.backendUtils;
@ -21,8 +21,6 @@ const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
const locationKeysHaveChanged const locationKeysHaveChanged
= require('./apiUtils/object/locationKeysHaveChanged'); = require('./apiUtils/object/locationKeysHaveChanged');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const logger = require('../utilities/logger');
const { validatePutVersionId } = require('./apiUtils/object/coldStorage'); const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -82,7 +80,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
uploadId, uploadId,
// Note: permissions for completing a multipart upload are the // Note: permissions for completing a multipart upload are the
// same as putting a part. // same as putting a part.
requestType: 'putPart or complete', requestType: request.apiMethods || 'putPart or complete',
log, log,
request, request,
}; };
@ -133,10 +131,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
bucketName, bucketName,
// Required permissions for this action // Required permissions for this action
// at the destinationBucket level are same as objectPut // at the destinationBucket level are same as objectPut
requestType: 'objectPut', requestType: request.apiMethods || 'completeMultipartUpload',
versionId, versionId,
request,
}; };
metadataValidateBucketAndObj(metadataValParams, log, next); standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
}, },
function validateMultipart(destBucket, objMD, next) { function validateMultipart(destBucket, objMD, next) {
if (objMD) { if (objMD) {
@ -214,9 +213,14 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const mdInfo = { storedParts, mpuOverviewKey, splitter }; const mdInfo = { storedParts, mpuOverviewKey, splitter };
const mpuInfo = const mpuInfo =
{ objectKey, uploadId, jsonList, bucketName, destBucket }; { objectKey, uploadId, jsonList, bucketName, destBucket };
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.completeMPU(request, mpuInfo, mdInfo, location, return data.completeMPU(request, mpuInfo, mdInfo, location,
null, null, null, locationConstraintCheck, log, null, null, null, locationConstraintCheck, log,
(err, completeObjData) => { (err, completeObjData) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }
@ -325,6 +329,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
replicationInfo: getReplicationInfo(objectKey, destBucket, replicationInfo: getReplicationInfo(objectKey, destBucket,
false, calculatedSize, REPLICATION_ACTION), false, calculatedSize, REPLICATION_ACTION),
originOp: 's3:ObjectCreated:CompleteMultipartUpload', originOp: 's3:ObjectCreated:CompleteMultipartUpload',
overheadField: constants.overheadField,
log, log,
}; };
// If key already exists // If key already exists
@ -394,8 +399,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
metaStoreParams.versionId = options.versionId; metaStoreParams.versionId = options.versionId;
metaStoreParams.versioning = options.versioning; metaStoreParams.versioning = options.versioning;
metaStoreParams.isNull = options.isNull; metaStoreParams.isNull = options.isNull;
metaStoreParams.nullVersionId = options.nullVersionId; metaStoreParams.deleteNullKey = options.deleteNullKey;
metaStoreParams.nullUploadId = options.nullUploadId; if (options.extraMD) {
Object.assign(metaStoreParams, options.extraMD);
}
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
// For external backends (where completeObjData is not // For external backends (where completeObjData is not
@ -467,12 +474,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const newDataStoreName = const newDataStoreName =
Array.isArray(dataLocations) && dataLocations[0] ? Array.isArray(dataLocations) && dataLocations[0] ?
dataLocations[0].dataStoreName : null; dataLocations[0].dataStoreName : null;
const delLog =
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids());
return data.batchDelete(dataToDelete, return data.batchDelete(dataToDelete,
request.method, request.method,
newDataStoreName, delLog, err => { newDataStoreName, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }
@ -495,10 +499,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
function batchDeleteExtraParts(extraPartLocations, destinationBucket, function batchDeleteExtraParts(extraPartLocations, destinationBucket,
aggregateETag, generatedVersionId, next) { aggregateETag, generatedVersionId, next) {
if (extraPartLocations && extraPartLocations.length > 0) { if (extraPartLocations && extraPartLocations.length > 0) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(extraPartLocations, request.method, return data.batchDelete(extraPartLocations, request.method,
null, delLog, err => { null, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -6,10 +6,11 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks'); const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck'); = require('./apiUtils/object/locationConstraintCheck');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing') const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
@ -65,7 +66,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader = const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location']; request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) { !config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName, monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload'); errors.InvalidStorageClass.code, 'initiateMultipartUpload');
@ -81,7 +82,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
if (metaHeaders instanceof Error) { if (metaHeaders instanceof Error) {
log.debug('user metadata validation failed', { log.debug('user metadata validation failed', {
error: metaHeaders, error: metaHeaders,
method: 'createAndStoreObject', method: 'initiateMultipartUpload',
}); });
return process.nextTick(() => callback(metaHeaders)); return process.nextTick(() => callback(metaHeaders));
} }
@ -105,7 +106,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
authInfo, authInfo,
bucketName, bucketName,
// Required permissions for this action are same as objectPut // Required permissions for this action are same as objectPut
requestType: 'objectPut', requestType: request.apiMethods || 'initiateMultipartUpload',
request, request,
}; };
const accountCanonicalID = authInfo.getCanonicalID(); const accountCanonicalID = authInfo.getCanonicalID();
@ -274,7 +275,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
} }
async.waterfall([ async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log, next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(error, destinationBucket) => { (error, destinationBucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket); const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
if (error) { if (error) {

View File

@ -6,7 +6,7 @@ const convertToXml = s3middleware.convertToXml;
const constants = require('../../constants'); const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -96,8 +96,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
// to list the multipart uploads so we have provided here that // to list the multipart uploads so we have provided here that
// the authorization to list multipart uploads is the same // the authorization to list multipart uploads is the same
// as listing objects in a bucket. // as listing objects in a bucket.
requestType: 'bucketGet', requestType: request.apiMethods || 'bucketGet',
preciseRequestType: 'listMultipartUploads', preciseRequestType: request.apiMethods || 'listMultipartUploads',
request, request,
}; };
@ -105,7 +105,7 @@ function listMultipartUploads(authInfo, request, log, callback) {
function waterfall1(next) { function waterfall1(next) {
// Check final destination bucket for authorization rather // Check final destination bucket for authorization rather
// than multipart upload bucket // than multipart upload bucket
metadataValidateBucket(metadataValParams, log, standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)); (err, bucket) => next(err, bucket));
}, },
function getMPUBucket(bucket, next) { function getMPUBucket(bucket, next) {

View File

@ -8,7 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const locationConstraintCheck = const locationConstraintCheck =
require('./apiUtils/object/locationConstraintCheck'); require('./apiUtils/object/locationConstraintCheck');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -97,7 +97,7 @@ function listParts(authInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: 'listParts', preciseRequestType: request.apiMethods || 'listParts',
request, request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
@ -114,7 +114,7 @@ function listParts(authInfo, request, log, callback) {
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
metadataValidateBucketAndObj(metadataValParams, log, standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket, null); return next(err, destinationBucket, null);
@ -152,8 +152,13 @@ function listParts(authInfo, request, log, callback) {
mpuOverviewObj, mpuOverviewObj,
destBucket, destBucket,
}; };
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.listParts(mpuInfo, request, locationConstraintCheck, return data.listParts(mpuInfo, request, locationConstraintCheck,
log, (err, backendPartList) => { log, (err, backendPartList) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -1,7 +1,7 @@
const { errors, versioning } = require('arsenal'); const { errors, versioning } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch'); const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
@ -71,7 +71,7 @@ function metadataSearch(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'metadataSearch', requestType: request.apiMethods || 'metadataSearch',
request, request,
}; };
const listParams = { const listParams = {
@ -103,7 +103,7 @@ function metadataSearch(authInfo, request, log, callback) {
listParams.marker = params.marker; listParams.marker = params.marker;
} }
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -11,21 +11,27 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const services = require('../services'); const services = require('../services');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { isBucketAuthorized } = const { isBucketAuthorized, evaluateBucketPolicyWithIAM } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const { preprocessingVersioningDelete } const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { metadataGetObject } = require('../metadata/metadataUtils'); const metadataUtils = require('../metadata/metadataUtils');
const { config } = require('../Config'); const { config } = require('../Config');
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers'); = require('./apiUtils/object/objectLockHelpers');
const requestUtils = policies.requestUtils; const requestUtils = policies.requestUtils;
const { validObjectKeys } = require('../routes/routeVeeam');
const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/* /*
Format of xml request: Format of xml request:
@ -167,6 +173,63 @@ function _parseXml(xmlToParse, next) {
}); });
} }
/**
* decodeObjectVersion - decode object version to be deleted
* @param {object} entry - entry from data model
* @param {function} next - callback to call with error or decoded version
* @return {undefined}
**/
function decodeObjectVersion(entry) {
let decodedVersionId;
if (entry.versionId) {
decodedVersionId = entry.versionId === 'null' ?
'null' : versionIdUtils.decode(entry.versionId);
}
if (decodedVersionId instanceof Error) {
return [errors.NoSuchVersion];
}
return [null, decodedVersionId];
}
/**
* Initialization function for the MultiObjectDelete API that will, based on the
* current metadata backend, assess if metadata READ batching is supported. If
* yes, the initialization step will call the metadataGetObjects function from
* the MetadataWrapper.
* @param {string} bucketName - bucket name
* @param {string []} inPlay - list of object keys still in play
* @param {object} log - logger object
* @param {function} callback - callback to call with error or list of objects
* @return {undefined}
*/
function initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback) {
if (config.multiObjectDeleteEnableOptimizations === false) {
return callback(null, {});
}
// If the backend supports batching, we want to optimize the API latency by
// first getting all the objects metadata, stored in memory, for later use
// in the API. This approach does not change the API architecture, but
// transplants an additional piece of code that can greatly improve the API
// latency when the database supports batching.
const objectKeys = Object.values(inPlay).map(entry => {
const [err, versionId] = decodeObjectVersion(entry, bucketName);
if (err) {
return null;
}
return {
versionId,
inPlay: entry,
};
});
return metadataUtils.metadataGetObjects(bucketName, objectKeys, log, (err, cache) => {
// This optional step is read-only, so any error can be safely ignored
if (err) {
return callback(null, {});
}
return callback(null, cache);
});
}
/** /**
* gets object metadata and deletes object * gets object metadata and deletes object
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info * @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -192,35 +255,19 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
let numOfObjectsRemoved = 0; let numOfObjectsRemoved = 0;
const skipError = new Error('skip'); const skipError = new Error('skip');
const objectLockedError = new Error('object locked'); const objectLockedError = new Error('object locked');
let deleteFromStorage = [];
// doing 5 requests at a time. note that the data wrapper return async.waterfall([
// will do 5 parallel requests to data backend to delete parts callback => initializeMultiObjectDeleteWithBatchingSupport(bucketName, inPlay, log, callback),
return async.forEachLimit(inPlay, 5, (entry, moveOn) => { (cache, callback) => async.forEachLimit(inPlay, config.multiObjectDeleteConcurrency, (entry, moveOn) => {
async.waterfall([ async.waterfall([
callback => { callback => callback(...decodeObjectVersion(entry, bucketName)),
let decodedVersionId;
if (entry.versionId) {
decodedVersionId = entry.versionId === 'null' ?
'null' : versionIdUtils.decode(entry.versionId);
}
if (decodedVersionId instanceof Error) {
monitoring.promMetrics('DELETE', bucketName, 404,
'multiObjectDelete');
return callback(errors.NoSuchVersion);
}
return callback(null, decodedVersionId);
},
// for obj deletes, no need to check acl's at object level // for obj deletes, no need to check acl's at object level
// (authority is at the bucket level for obj deletes) // (authority is at the bucket level for obj deletes)
(versionId, callback) => metadataGetObject(bucketName, entry.key, (versionId, callback) => metadataUtils.metadataGetObject(bucketName, entry.key,
versionId, log, (err, objMD) => { versionId, cache, log, (err, objMD) => callback(err, objMD, versionId)),
// if general error from metadata return error (objMD, versionId, callback) => {
if (err && !err.is.NoSuchKey) { if (!objMD) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return callback(err);
}
if (err?.is.NoSuchKey) {
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
// To adhere to AWS behavior, create a delete marker // To adhere to AWS behavior, create a delete marker
// if trying to delete an object that does not exist // if trying to delete an object that does not exist
@ -243,7 +290,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
objMD.location[0].deleteVersion = true; objMD.location[0].deleteVersion = true;
} }
return callback(null, objMD, versionId); return callback(null, objMD, versionId);
}), },
(objMD, versionId, callback) => { (objMD, versionId, callback) => {
// AWS only returns an object lock error if a version id // AWS only returns an object lock error if a version id
// is specified, else continue to create a delete marker // is specified, else continue to create a delete marker
@ -254,7 +301,8 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) { if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) {
return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => { return checkUserGovernanceBypass(request, authInfo, bucket, entry.key, log, error => {
if (error && error.is.AccessDenied) { if (error && error.is.AccessDenied) {
log.debug('user does not have BypassGovernanceRetention and object is locked', { error }); log.debug('user does not have BypassGovernanceRetention and object is locked',
{ error });
return callback(objectLockedError); return callback(objectLockedError);
} }
if (error) { if (error) {
@ -285,24 +333,41 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId); return callback(null, objMD, versionId);
}, },
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => { (objMD, versionId, callback) => {
const options = preprocessingVersioningDelete(bucketName, bucket, objMD, versionId); const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
const deleteInfo = {}; const deleteInfo = {};
if (options && options.deleteData) { if (options && options.deleteData) {
options.overheadField = overheadField;
deleteInfo.deleted = true; deleteInfo.deleted = true;
if (!_bucketRequiresOplogUpdate(bucket)) {
options.doesNotNeedOpogUpdate = true;
}
if (objMD.uploadId) { if (objMD.uploadId) {
// eslint-disable-next-line // eslint-disable-next-line
options.replayId = objMD.uploadId; options.replayId = objMD.uploadId;
} }
return services.deleteObject(bucketName, objMD, return services.deleteObject(bucketName, objMD,
entry.key, options, log, err => entry.key, options, config.multiObjectDeleteEnableOptimizations, log,
callback(err, objMD, deleteInfo)); 's3:ObjectRemoved:Delete', (err, toDelete) => {
if (err) {
return callback(err);
}
if (toDelete) {
deleteFromStorage = deleteFromStorage.concat(toDelete);
}
return callback(null, objMD, deleteInfo);
});
} }
deleteInfo.newDeleteMarker = true; deleteInfo.newDeleteMarker = true;
// This call will create a delete-marker // This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key, return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request, objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, (err, result) => deleteInfo.newDeleteMarker, null, overheadField, log,
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
callback(err, objMD, deleteInfo, result.versionId)); callback(err, objMD, deleteInfo, result.versionId));
}, },
], (err, objMD, deleteInfo, versionId) => { ], (err, objMD, deleteInfo, versionId) => {
@ -339,16 +404,51 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
isDeleteMarker = true; isDeleteMarker = true;
deleteMarkerVersionId = entry.versionId; deleteMarkerVersionId = entry.versionId;
} }
successfullyDeleted.push({ entry, isDeleteMarker, successfullyDeleted.push({
deleteMarkerVersionId }); entry, isDeleteMarker,
deleteMarkerVersionId,
});
return moveOn(); return moveOn();
}); });
}, },
// end of forEach func // end of forEach func
err => { err => {
log.trace('finished deleting objects', { numOfObjectsRemoved }); // Batch delete all objects
return next(err, quietSetting, errorResults, numOfObjectsRemoved, const onDone = () => callback(err, quietSetting, errorResults, numOfObjectsRemoved,
successfullyDeleted, totalContentLengthDeleted, bucket); successfullyDeleted, totalContentLengthDeleted, bucket);
if (err && deleteFromStorage.length === 0) {
log.trace('no objects to delete from data backend');
return onDone();
}
// If error but we have objects in the list, delete them to ensure
// consistent state.
log.trace('deleting objects from data backend');
// Split the array into chunks
const chunks = [];
while (deleteFromStorage.length > 0) {
chunks.push(deleteFromStorage.splice(0, config.multiObjectDeleteConcurrency));
}
return async.each(chunks, (chunk, done) => data.batchDelete(chunk, null, null,
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids()), done),
err => {
if (err) {
log.error('error deleting objects from data backend', { error: err });
return onDone(err);
}
return onDone();
});
}),
], (err, ...results) => {
// if general error from metadata return error
if (err) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return next(err);
}
return next(null, ...results);
}); });
} }
@ -382,6 +482,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
return callback(errors.BadDigest); return callback(errors.BadDigest);
} }
const inPlayInternal = [];
const bucketName = request.bucketName; const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
@ -395,15 +496,47 @@ function multiObjectDelete(authInfo, request, log, callback) {
return next(null, quietSetting, objects); return next(null, quietSetting, objects);
}); });
}, },
function checkPolicies(quietSetting, objects, next) { function checkBucketMetadata(quietSetting, objects, next) {
const errorResults = [];
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
objects.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the objects array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, objects, bucketMD);
});
},
function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) {
// track keys that are still on track to be deleted // track keys that are still on track to be deleted
const inPlay = []; const inPlay = [];
const errorResults = [];
// if request from account, no need to check policies // if request from account, no need to check policies
// all objects are inPlay so send array of object keys // all objects are inPlay so send array of object keys
// as inPlay argument // as inPlay argument
if (!isRequesterNonAccountUser(authInfo)) { if (!isRequesterNonAccountUser(authInfo)) {
return next(null, quietSetting, errorResults, objects); return next(null, quietSetting, errorResults, objects, bucketMD);
} }
// TODO: once arsenal's extractParams is separated from doAuth // TODO: once arsenal's extractParams is separated from doAuth
@ -447,7 +580,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
error: errors.AccessDenied }); error: errors.AccessDenied });
}); });
// send empty array for inPlay // send empty array for inPlay
return next(null, quietSetting, errorResults, []); return next(null, quietSetting, errorResults, [], bucketMD);
} }
if (err) { if (err) {
log.trace('error checking policies', { log.trace('error checking policies', {
@ -465,6 +598,13 @@ function multiObjectDelete(authInfo, request, log, callback) {
}); });
return next(errors.InternalError); return next(errors.InternalError);
} }
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
for (let i = 0; i < authorizationResults.length; i++) { for (let i = 0; i < authorizationResults.length; i++) {
const result = authorizationResults[i]; const result = authorizationResults[i];
// result is { isAllowed: true, // result is { isAllowed: true,
@ -480,8 +620,31 @@ function multiObjectDelete(authInfo, request, log, callback) {
key: result.arn.slice(slashIndex + 1), key: result.arn.slice(slashIndex + 1),
versionId: result.versionId, versionId: result.versionId,
}; };
if (result.isAllowed) { // Deny immediately if there is an explicit deny
if (!result.isImplicit && !result.isAllowed) {
errorResults.push({
entry,
error: errors.AccessDenied,
});
continue;
}
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
canonicalID,
authInfo,
actionImplicitDenies,
log,
request);
if (areAllActionsAllowed) {
if (validObjectKeys.includes(entry.key)) {
inPlayInternal.push(entry.key);
} else {
inPlay.push(entry); inPlay.push(entry);
}
} else { } else {
errorResults.push({ errorResults.push({
entry, entry,
@ -489,50 +652,14 @@ function multiObjectDelete(authInfo, request, log, callback) {
}); });
} }
} }
return next(null, quietSetting, errorResults, inPlay);
});
},
function checkBucketMetadata(quietSetting, errorResults, inPlay, next) {
// if no objects in play, no need to check ACLs / get metadata,
// just move on if there is no Origin header
if (inPlay.length === 0 && !request.headers.origin) {
return next(null, quietSetting, errorResults, inPlay,
undefined);
}
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// if no objects in play, no need to check ACLs
if (inPlay.length === 0) {
return next(null, quietSetting, errorResults, inPlay,
bucketMD);
}
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
inPlay.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the inPlay array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, inPlay, bucketMD); return next(null, quietSetting, errorResults, inPlay, bucketMD);
}); });
}, },
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
return async.each(inPlayInternal,
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),
err => next(err, quietSetting, errorResults, inPlay, bucketMD));
},
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay, function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
bucket, next) { bucket, next) {
return getObjMetadataAndDelete(authInfo, canonicalID, request, return getObjMetadataAndDelete(authInfo, canonicalID, request,
@ -575,4 +702,6 @@ function multiObjectDelete(authInfo, request, log, callback) {
module.exports = { module.exports = {
getObjMetadataAndDelete, getObjMetadataAndDelete,
multiObjectDelete, multiObjectDelete,
decodeObjectVersion,
initializeMultiObjectDeleteWithBatchingSupport,
}; };

View File

@ -12,11 +12,10 @@ const { checkQueryVersionId, versioningPreprocessing }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const { data } = require('../data/wrapper'); const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const services = require('../services'); const services = require('../services');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked'); const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing') const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader; .validateWebsiteHeader;
const { config } = require('../Config'); const { config } = require('../Config');
@ -24,6 +23,7 @@ const monitoring = require('../utilities/monitoringHandler');
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD'); const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption'); const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const locationHeader = constants.objectLocationConstraintHeader; const locationHeader = constants.objectLocationConstraintHeader;
@ -218,7 +218,16 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: sourceBucket, bucketName: sourceBucket,
objectKey: sourceObject, objectKey: sourceObject,
versionId: sourceVersionId, versionId: sourceVersionId,
getDeleteMarker: true,
requestType: 'objectGet', requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request, request,
}; };
const valPutParams = { const valPutParams = {
@ -226,6 +235,7 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName, bucketName: destBucketName,
objectKey: destObjectKey, objectKey: destObjectKey,
requestType: 'objectPut', requestType: 'objectPut',
checkQuota: false,
request, request,
}; };
const dataStoreContext = { const dataStoreContext = {
@ -239,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {}; const responseHeaders = {};
if (request.headers['x-amz-storage-class'] && if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) { !config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header'); log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName, monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject'); errors.InvalidStorageClass.code, 'copyObject');
@ -259,7 +269,7 @@ function objectCopy(authInfo, request, sourceBucket,
} }
return async.waterfall([ return async.waterfall([
function checkDestAuth(next) { function checkDestAuth(next) {
return metadataValidateBucketAndObj(valPutParams, log, return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log,
(err, destBucketMD, destObjMD) => { (err, destBucketMD, destObjMD) => {
if (err) { if (err) {
log.debug('error validating put part of request', log.debug('error validating put part of request',
@ -277,7 +287,10 @@ function objectCopy(authInfo, request, sourceBucket,
}); });
}, },
function checkSourceAuthorization(destBucketMD, destObjMD, next) { function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return metadataValidateBucketAndObj(valGetParams, log, return standardMetadataValidateBucketAndObj({
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => { (err, sourceBucketMD, sourceObjMD) => {
if (err) { if (err) {
log.debug('error validating get part of request', log.debug('error validating get part of request',
@ -290,6 +303,11 @@ function objectCopy(authInfo, request, sourceBucket,
log.debug('no source object', { sourceObject }); log.debug('no source object', { sourceObject });
return next(err, null, destBucketMD); return next(err, null, destBucketMD);
} }
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(sourceObjMD);
if (coldErr) {
return next(coldErr, null);
}
if (sourceObjMD.isDeleteMarker) { if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object', log.debug('delete marker on source object',
{ sourceObject }); { sourceObject });
@ -324,6 +342,10 @@ function objectCopy(authInfo, request, sourceBucket,
dataStoreContext.metaHeaders = dataStoreContext.metaHeaders =
storeMetadataParams.metaHeaders; storeMetadataParams.metaHeaders;
} }
// eslint-disable-next-line no-param-reassign
storeMetadataParams.overheadField = constants.overheadField;
let dataLocator; let dataLocator;
// If 0 byte object just set dataLocator to empty array // If 0 byte object just set dataLocator to empty array
if (!sourceObjMD.location) { if (!sourceObjMD.location) {
@ -439,10 +461,15 @@ function objectCopy(authInfo, request, sourceBucket,
return next(null, storeMetadataParams, dataLocator, destObjMD, return next(null, storeMetadataParams, dataLocator, destObjMD,
serverSideEncryption, destBucketMD); serverSideEncryption, destBucketMD);
} }
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.copyObject(request, sourceLocationConstraintName, return data.copyObject(request, sourceLocationConstraintName,
storeMetadataParams, dataLocator, dataStoreContext, storeMetadataParams, dataLocator, dataStoreContext,
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log, backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
(err, results) => { (err, results) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) { if (err) {
return next(err, destBucketMD); return next(err, destBucketMD);
} }
@ -466,10 +493,9 @@ function objectCopy(authInfo, request, sourceBucket,
storeMetadataParams.versioning = options.versioning; storeMetadataParams.versioning = options.versioning;
// eslint-disable-next-line // eslint-disable-next-line
storeMetadataParams.isNull = options.isNull; storeMetadataParams.isNull = options.isNull;
// eslint-disable-next-line if (options.extraMD) {
storeMetadataParams.nullVersionId = options.nullVersionId; Object.assign(storeMetadataParams, options.extraMD);
// eslint-disable-next-line }
storeMetadataParams.nullUploadId = options.nullUploadId;
const dataToDelete = options.dataToDelete; const dataToDelete = options.dataToDelete;
return next(null, storeMetadataParams, destDataGetInfoArr, return next(null, storeMetadataParams, destDataGetInfoArr,
destObjMD, serverSideEncryption, destBucketMD, destObjMD, serverSideEncryption, destBucketMD,
@ -518,10 +544,8 @@ function objectCopy(authInfo, request, sourceBucket,
// the same as the destination // the same as the destination
if (!sourceIsDestination && dataToDelete) { if (!sourceIsDestination && dataToDelete) {
const newDataStoreName = storeMetadataParams.dataStoreName; const newDataStoreName = storeMetadataParams.dataStoreName;
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(dataToDelete, request.method, return data.batchDelete(dataToDelete, request.method,
newDataStoreName, delLog, err => { newDataStoreName, log, err => {
if (err) { if (err) {
// if error, log the error and move on as it is not // if error, log the error and move on as it is not
// relevant to the client as the client's // relevant to the client as the client's

View File

@ -8,26 +8,30 @@ const { pushMetric } = require('../utapi/utilities');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete } const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo } const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers'); = require('./apiUtils/object/objectLockHelpers');
const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks'); const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissionChecks');
const { config } = require('../Config');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const objectLockedError = new Error('object locked'); const objectLockedError = new Error('object locked');
const { overheadField } = require('../../constants');
/** /**
* objectDelete - DELETE an object from a bucket * objectDeleteInternal - DELETE an object from a bucket
* @param {AuthInfo} authInfo - requester's infos * @param {AuthInfo} authInfo - requester's infos
* @param {object} request - request object given by router, * @param {object} request - request object given by router,
* includes normalized headers * includes normalized headers
* @param {Logger} log - werelogs request instance * @param {Logger} log - werelogs request instance
* @param {boolean} isExpiration - true if the call comes from LifecycleExpiration
* @param {function} cb - final cb to call with the result and response headers * @param {function} cb - final cb to call with the result and response headers
* @return {undefined} * @return {undefined}
*/ */
function objectDelete(authInfo, request, log, cb) { function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
log.debug('processing request', { method: 'objectDelete' }); log.debug('processing request', { method: 'objectDeleteInternal' });
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics( monitoring.promMetrics(
@ -52,14 +56,14 @@ function objectDelete(authInfo, request, log, cb) {
bucketName, bucketName,
objectKey, objectKey,
versionId: reqVersionId, versionId: reqVersionId,
requestType: 'objectDelete', requestType: request.apiMethods || 'objectDelete',
request, request,
}; };
const canonicalID = authInfo.getCanonicalID(); const canonicalID = authInfo.getCanonicalID();
return async.waterfall([ return async.waterfall([
function validateBucketAndObj(next) { function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(valParams, log, return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log,
(err, bucketMD, objMD) => { (err, bucketMD, objMD) => {
if (err) { if (err) {
return next(err, bucketMD); return next(err, bucketMD);
@ -151,11 +155,10 @@ function objectDelete(authInfo, request, log, cb) {
}, },
function deleteOperation(bucketMD, objectMD, next) { function deleteOperation(bucketMD, objectMD, next) {
const delOptions = preprocessingVersioningDelete( const delOptions = preprocessingVersioningDelete(
bucketName, bucketMD, objectMD, reqVersionId); bucketName, bucketMD, objectMD, reqVersionId, config.nullVersionCompatMode);
const deleteInfo = { const deleteInfo = {
removeDeleteMarker: false, removeDeleteMarker: false,
newDeleteMarker: false, newDeleteMarker: false,
isNull: delOptions.isNull,
}; };
if (delOptions && delOptions.deleteData && bucketMD.isNFS() && if (delOptions && delOptions.deleteData && bucketMD.isNFS() &&
bucketMD.getReplicationConfiguration()) { bucketMD.getReplicationConfiguration()) {
@ -164,7 +167,10 @@ function objectDelete(authInfo, request, log, cb) {
// source does not have versioning. // source does not have versioning.
return createAndStoreObject(bucketName, bucketMD, objectKey, return createAndStoreObject(bucketName, bucketMD, objectKey,
objectMD, authInfo, canonicalID, null, request, true, null, objectMD, authInfo, canonicalID, null, request, true, null,
log, err => { log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
err => {
if (err) { if (err) {
return next(err); return next(err);
} }
@ -174,12 +180,15 @@ function objectDelete(authInfo, request, log, cb) {
deleteInfo.removeDeleteMarker = true; deleteInfo.removeDeleteMarker = true;
} }
return services.deleteObject(bucketName, objectMD, return services.deleteObject(bucketName, objectMD,
objectKey, delOptions, log, (err, delResult) => objectKey, delOptions, false, log, isExpiration ?
next(err, bucketMD, objectMD, delResult, 's3:LifecycleExpiration:Delete' :
deleteInfo)); 's3:ObjectRemoved:Delete',
(err, delResult) =>
next(err, bucketMD, objectMD, delResult, deleteInfo));
}); });
} }
if (delOptions && delOptions.deleteData) { if (delOptions && delOptions.deleteData) {
delOptions.overheadField = overheadField;
if (objectMD.isDeleteMarker) { if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set // record that we deleted a delete marker to set
// response headers accordingly // response headers accordingly
@ -191,15 +200,25 @@ function objectDelete(authInfo, request, log, cb) {
delOptions.replayId = objectMD.uploadId; delOptions.replayId = objectMD.uploadId;
} }
if (!_bucketRequiresOplogUpdate(bucketMD)) {
delOptions.doesNotNeedOpogUpdate = true;
}
return services.deleteObject(bucketName, objectMD, objectKey, return services.deleteObject(bucketName, objectMD, objectKey,
delOptions, log, (err, delResult) => next(err, bucketMD, delOptions, false, log, isExpiration ?
's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete',
(err, delResult) => next(err, bucketMD,
objectMD, delResult, deleteInfo)); objectMD, delResult, deleteInfo));
} }
// putting a new delete marker // putting a new delete marker
deleteInfo.newDeleteMarker = true; deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD, return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request, objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) => { deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
(err, newDelMarkerRes) => {
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo); next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
}); });
}, },
@ -288,4 +307,21 @@ function objectDelete(authInfo, request, log, cb) {
}); });
} }
module.exports = objectDelete; /**
* This function is used to delete an object from a bucket. The bucket must
* already exist and the user must have permission to delete the object.
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {werelogs.Logger} log - Logger object
* @param {function} cb - callback to server
* @return {undefined}
*/
function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' });
return objectDeleteInternal(authInfo, request, log, false, cb);
}
module.exports = {
objectDelete,
objectDeleteInternal,
};

View File

@ -1,16 +1,17 @@
const async = require('async'); const async = require('async');
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const { data } = require('../data/wrapper'); const { data } = require('../data/wrapper');
const { config } = require('../Config');
const REPLICATION_ACTION = 'DELETE_TAGGING'; const REPLICATION_ACTION = 'DELETE_TAGGING';
/** /**
@ -41,13 +42,14 @@ function objectDeleteTagging(authInfo, request, log, callback) {
authInfo, authInfo,
bucketName, bucketName,
objectKey, objectKey,
requestType: 'objectDeleteTagging',
versionId: reqVersionId, versionId: reqVersionId,
getDeleteMarker: true,
requestType: request.apiMethods || 'objectDeleteTagging',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log, next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',
@ -64,6 +66,8 @@ function objectDeleteTagging(authInfo, request, log, callback) {
if (objectMD.isDeleteMarker) { if (objectMD.isDeleteMarker) {
log.trace('version is a delete marker', log.trace('version is a delete marker',
{ method: 'objectDeleteTagging' }); { method: 'objectDeleteTagging' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.MethodNotAllowed, bucket); return next(errors.MethodNotAllowed, bucket);
} }
return next(null, bucket, objectMD); return next(null, bucket, objectMD);
@ -71,8 +75,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
(bucket, objectMD, next) => { (bucket, objectMD, next) => {
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
objectMD.tags = {}; objectMD.tags = {};
const params = objectMD.versionId ? { versionId: const params = getVersionSpecificMetadataOptions(objectMD, config.nullVersionCompatMode);
objectMD.versionId } : {};
const replicationInfo = getReplicationInfo(objectKey, bucket, true, const replicationInfo = getReplicationInfo(objectKey, bucket, true,
0, REPLICATION_ACTION, objectMD); 0, REPLICATION_ACTION, objectMD);
if (replicationInfo) { if (replicationInfo) {
@ -88,7 +91,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
}, },
(bucket, objectMD, next) => (bucket, objectMD, next) =>
// if external backends handles tagging // if external backends handles tagging
data.objectTagging('Delete', objectKey, bucket, objectMD, data.objectTagging('Delete', objectKey, bucket.getName(), objectMD,
log, err => next(err, bucket, objectMD)), log, err => next(err, bucket, objectMD)),
], (err, bucket, objectMD) => { ], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin, const additionalResHeaders = collectCorsHeaders(request.headers.origin,

View File

@ -15,12 +15,13 @@ const getReplicationBackendDataLocator =
require('./apiUtils/object/getReplicationBackendDataLocator'); require('./apiUtils/object/getReplicationBackendDataLocator');
const checkReadLocation = require('./apiUtils/object/checkReadLocation'); const checkReadLocation = require('./apiUtils/object/checkReadLocation');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { config } = require('../Config'); const { config } = require('../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo'); const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders'); const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const validateHeaders = s3middleware.validateConditionalHeaders; const validateHeaders = s3middleware.validateConditionalHeaders;
@ -64,11 +65,12 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId, versionId,
requestType: 'objectGet', getDeleteMarker: true,
requestType: request.apiMethods || 'objectGet',
request, request,
}; };
return metadataValidateBucketAndObj(mdValParams, log, return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log,
(err, bucket, objMD) => { (err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
@ -88,16 +90,12 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
if (objMD.archive && // check if object data is in a cold storage
// Object is in cold backend const coldErr = verifyColdObjectAvailable(objMD);
(!objMD.archive.restoreRequestedAt || if (coldErr) {
// Object is being restored
(objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt))) {
const error = errors.InvalidObjectState;
monitoring.promMetrics( monitoring.promMetrics(
'GET', bucketName, error.code, 'getObject'); 'GET', bucketName, coldErr.code, 'getObject');
return callback(error, null, corsHeaders); return callback(coldErr, null, corsHeaders);
} }
if (objMD.isDeleteMarker) { if (objMD.isDeleteMarker) {
const responseMetaHeaders = Object.assign({}, const responseMetaHeaders = Object.assign({},

View File

@ -7,7 +7,7 @@ const { pushMetric } = require('../utapi/utilities');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
@ -54,12 +54,14 @@ function objectGetACL(authInfo, request, log, callback) {
} }
const versionId = decodedVidResult; const versionId = decodedVidResult;
// FIXME pass 'getDeleteMarker: true' option to set
// 'x-amz-delete-marker' header (see S3C-7592)
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
objectKey, objectKey,
versionId, versionId,
requestType: 'objectGetACL', requestType: request.apiMethods || 'objectGetACL',
request, request,
}; };
const grantInfo = { const grantInfo = {
@ -72,7 +74,7 @@ function objectGetACL(authInfo, request, log, callback) {
return async.waterfall([ return async.waterfall([
function validateBucketAndObj(next) { function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(metadataValParams, log, return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',
@ -90,10 +92,14 @@ function objectGetACL(authInfo, request, log, callback) {
if (versionId) { if (versionId) {
log.trace('requested version is delete marker', log.trace('requested version is delete marker',
{ method: 'objectGetACL' }); { method: 'objectGetACL' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.MethodNotAllowed); return next(errors.MethodNotAllowed);
} }
log.trace('most recent version is delete marker', log.trace('most recent version is delete marker',
{ method: 'objectGetACL' }); { method: 'objectGetACL' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.NoSuchKey); return next(errors.NoSuchKey);
} }
return next(null, bucket, objectMD); return next(null, bucket, objectMD);

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -33,17 +33,19 @@ function objectGetLegalHold(authInfo, request, log, callback) {
} }
const versionId = decodedVidResult; const versionId = decodedVidResult;
// FIXME pass 'getDeleteMarker: true' option to set
// 'x-amz-delete-marker' header (see S3C-7592)
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
objectKey, objectKey,
requestType: 'objectGetLegalHold',
versionId, versionId,
requestType: request.apiMethods || 'objectGetLegalHold',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log, next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',
@ -61,10 +63,14 @@ function objectGetLegalHold(authInfo, request, log, callback) {
if (versionId) { if (versionId) {
log.trace('requested version is delete marker', log.trace('requested version is delete marker',
{ method: 'objectGetLegalHold' }); { method: 'objectGetLegalHold' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.MethodNotAllowed); return next(errors.MethodNotAllowed);
} }
log.trace('most recent version is delete marker', log.trace('most recent version is delete marker',
{ method: 'objectGetLegalHold' }); { method: 'objectGetLegalHold' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.NoSuchKey); return next(errors.NoSuchKey);
} }
if (!bucket.isObjectLockEnabled()) { if (!bucket.isObjectLockEnabled()) {

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -33,17 +33,19 @@ function objectGetRetention(authInfo, request, log, callback) {
} }
const reqVersionId = decodedVidResult; const reqVersionId = decodedVidResult;
// FIXME pass 'getDeleteMarker: true' option to set
// 'x-amz-delete-marker' header (see S3C-7592)
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
objectKey, objectKey,
requestType: 'objectGetRetention',
versionId: reqVersionId, versionId: reqVersionId,
requestType: request.apiMethods || 'objectGetRetention',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log, next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => { (err, bucket, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', log.trace('request authorization failed',
@ -61,10 +63,14 @@ function objectGetRetention(authInfo, request, log, callback) {
if (reqVersionId) { if (reqVersionId) {
log.trace('requested version is delete marker', log.trace('requested version is delete marker',
{ method: 'objectGetRetention' }); { method: 'objectGetRetention' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.MethodNotAllowed); return next(errors.MethodNotAllowed);
} }
log.trace('most recent version is delete marker', log.trace('most recent version is delete marker',
{ method: 'objectGetRetention' }); { method: 'objectGetRetention' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.NoSuchKey); return next(errors.NoSuchKey);
} }
if (!bucket.isObjectLockEnabled()) { if (!bucket.isObjectLockEnabled()) {

Some files were not shown because too many files have changed in this diff Show More