Compare commits

...

1205 Commits

Author SHA1 Message Date
Vitaliy Filippov b5711e9cbf Use fs.readFileSync to read config file instead of require 2024-08-13 11:19:38 +03:00
Vitaliy Filippov 36dc6298d2 Use webpack to pack 2024-08-13 02:20:08 +03:00
Vitaliy Filippov bc2d637578 Add installation instructions for Vitastor backend 2024-08-12 01:36:42 +03:00
Vitaliy Filippov b543695048 Add example Vitastor backend configs 2024-08-11 17:24:05 +03:00
Vitaliy Filippov 90024d044d Configure "legacy" werelogs because otherwise MultipleBackendGateway was skipping messages 2024-08-04 01:22:48 +03:00
Vitaliy Filippov 451ab33f68 Use config.workers instead of config.clusters 2024-08-03 14:10:39 +03:00
Vitaliy Filippov c86107e912 Add authdata config file reference to config.json 2024-08-03 01:36:01 +03:00
Vitaliy Filippov 0a5962f256 Require scality kms only if kms backend is scality 2024-08-03 01:29:04 +03:00
Vitaliy Filippov 0e292791c6 Setup backends in config.json 2024-08-02 01:45:38 +03:00
Vitaliy Filippov fc07729bd0 Use ^versions 2024-08-02 01:44:13 +03:00
Vitaliy Filippov 4527dd6795 Do not store actual configs in git 2024-08-01 15:52:02 +03:00
Vitaliy Filippov 05fb581023 Use x-amz-storage-class instead of x-amz-meta-scal-location-constraint
FIXME: Ideally, both locations and storage classes should be supported
2024-07-28 02:00:38 +03:00
Vitaliy Filippov 956739a04e Use internal vaultclient for utapi server 2024-07-23 16:32:48 +03:00
Vitaliy Filippov 7ad0888a66 Change git dependency URLs 2024-07-21 17:36:47 +03:00
Vitaliy Filippov bf01ba4ed1 Change git dependency URLs 2024-07-21 15:26:06 +03:00
Vitaliy Filippov ab019e7e50 Make vaultclient dependency optional 2024-07-21 14:19:54 +03:00
Vitaliy Filippov 3797695e74 Make bucketclient dependency optional 2024-07-18 11:17:05 +03:00
Vitaliy Filippov c8084196c4 Remove remote management 2024-07-16 20:34:11 +03:00
bert-e b72e918ff9 Merge branch 'w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.8/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 22887f47d8 Merge branch 'w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 0cd10a73f3 Merge branch 'w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
bert-e e139406612 Merge branch 'bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
Maha Benzekri d91853a38b
processBucketPolicy fixup for objectDelete
Introduced by https://github.com/scality/cloudserver/pull/5580
we now do send a requestContext with no specific resource instead
of "null", which results in a policy evaluation error.
As we get an implicit deny for the requestType "objectDelete",
cause the processed result to be false , thus sending an empty
array of objects to vault , resulting in a deny even when the policy
allows the action on specific objects.

Linked Issue : https://scality.atlassian.net/browse/CLDSRV-555
2024-07-15 14:20:08 +02:00
Mickael Bourgois a7e798f909
CLDSRV-544: bump version 8.8.27 2024-07-03 19:08:02 +02:00
Mickael Bourgois 3a1ba29869
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-544-stderr' into w/8.8/improvement/CLDSRV-544-stderr 2024-07-03 19:07:41 +02:00
Mickael Bourgois dbb9b6d787
CLDSRV-544: bump version 8.7.48 2024-07-03 18:52:35 +02:00
Mickael Bourgois fce76f0934
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-544-stderr' into w/8.7/improvement/CLDSRV-544-stderr 2024-07-03 18:52:20 +02:00
Mickael Bourgois 0e39aaac09
CLDSRV: bump version 8.6.27 2024-07-03 18:48:28 +02:00
Mickael Bourgois 0b14c93fac
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-544-stderr' into w/8.6/improvement/CLDSRV-544-stderr 2024-07-03 18:48:12 +02:00
Mickael Bourgois ab2960bbf4
CLDSRV-544: bump version 2024-07-01 12:28:23 +02:00
Mickael Bourgois 7305b112e2
Merge remote-tracking branch 'origin/improvement/CLDSRV-544-stderr' into w/7.70/improvement/CLDSRV-544-stderr 2024-07-01 12:28:07 +02:00
Mickael Bourgois cd9e2e757b
CLDSRV-544: bump version 2024-06-30 21:15:52 +02:00
Mickael Bourgois ca0904f584
CLDSRV-544 Add timestamp on stderr utapi v1 2024-06-30 21:15:52 +02:00
Mickael Bourgois 0dd3dd35e6
CLDSRV-544: Add timestamp on stderr
The previous version would not exit the master of the cluster
Now it exits as it should do
2024-06-30 21:15:52 +02:00
bert-e bf7e4b7e23 Merge branch 'w/8.7/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:30 +00:00
bert-e 92f4794727 Merge branch 'w/8.6/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:29 +00:00
Jonathan Gramain c6ef85e3a1 Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-fixup-version' into w/8.6/bugfix/CLDSRV-547-fixup-version 2024-06-27 14:05:27 -07:00
Jonathan Gramain c0fe0cfbcf CLDSRV-547 [fixup] bump version to 7.70.49
Fixup the version, as 7.70.48 was already tagged
2024-06-27 11:42:37 -07:00
bert-e 9c936f2b83 Merge branch 'w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
bert-e d26bac2ebc Merge branch 'w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
Jonathan Gramain cfb9db5178 Merge branch 'w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:53:41 -07:00
Jonathan Gramain 2ce004751a Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:32:45 -07:00
Jonathan Gramain 539219e046 CLDSRV-547 bump cloudserver version 2024-06-27 10:27:45 -07:00
Jonathan Gramain be49e55db5 bf: CLDSRV-547 update redis config for utapi reindex
Update the redis configuration of utapi reindex to include a list of
sentinels, rather than a single sentinel (previously set to
"localhost" in Federation).

I took this opportunity to cleanup tech debt related to parsing redis
configuration, using "joi" for validation instead and making it common
across the three different places where redis config is parsed. Not
doing so would have required yet another copy-paste of dumb and
error-prone validation code. Added unit tests for the new validation.
2024-06-27 10:25:10 -07:00
bert-e e6b240421b Merge branch 'w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.8/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
bert-e 81739e3ecf Merge branch 'w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
Jonathan Gramain c475503248 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-25 18:40:18 -07:00
bert-e 7acbd5d2fb Merge branch 'bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:39:02 +00:00
Jonathan Gramain 8d726322e5 CLDSRV-549 restore 'git.commit-sha' and 'git.repository' labels
Add back the 'git.commit-sha' and 'git.repository' labels to pushed
images, which were not attached anymore after the change of registry.
2024-06-25 18:26:54 -07:00
williamlardier 4f7aa54886 CLDSRV-541: bump project version 2024-06-13 13:58:54 +02:00
williamlardier 0117a5b0b4 CLDSRV-541: add unit test for deleteobjects authz 2024-06-13 13:58:54 +02:00
williamlardier f679831ba2 CLDSRV-541: update unit tests 2024-06-13 13:56:18 +02:00
williamlardier bb162ca7d3 CLDSRV-541: send request context in deleteobjects to get quota information 2024-06-13 11:58:33 +02:00
williamlardier 0c6dfc7b6e CLDSRV-537: bump project version 2024-05-31 13:47:26 +02:00
williamlardier d608d849df CLDSRV-537: bump checkout version for alerts 2024-05-31 13:47:26 +02:00
williamlardier 2cb63f58d4 CLDSRV-537: bump action-prom-render-test version 2024-05-31 13:44:05 +02:00
williamlardier 51585712f4 CLDSRV-537: do not raise quota error if no quota is defined
This ensures fresh installs, or buckets that get empty-ed are
not triggering the alert by mistake
2024-05-31 13:44:05 +02:00
bert-e 61eb24e46f Merge branch 'w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a34b162782 Merge branch 'w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.8/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a9e50fe046 Merge branch 'w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
bert-e 4150a8432e Merge branch 'bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
Taylor McKinnon 7e70ff9cbc Disable git clone protection to work around git bug affecting git-lfs 2024-05-22 10:05:17 -07:00
bert-e 09dc45289c Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:31 +00:00
bert-e 47c628e0e1 Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:30 +00:00
Nicolas Humbert a1f4d3fe8a CLDSRV-529 use shorthand utapi dependency format 2024-05-17 15:10:40 +02:00
williamlardier 926242b077 CLDSRV-553: bump project version 2024-05-17 12:35:59 +02:00
williamlardier aa2aac5db3 CLDSRV-553: functional restore test to simulate cold backend calls 2024-05-17 12:35:59 +02:00
williamlardier f2e2d82e51 CLDSRV-553: unit test the onlyCheckQuota flag 2024-05-17 12:35:59 +02:00
williamlardier 88ad86b0c6 CLDSRV-553: adapt calls to quota evaluation
When the API is being called by a cold backend, the
x-scal-s3-version-id header is set. In this case, the quotas must
be evaluated with a 0 inflight.
2024-05-17 12:35:59 +02:00
bert-e 8f25892247 Merge branch 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:32 +00:00
bert-e 9ac207187b Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:31 +00:00
Anurag Mittal 624a04805f
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-529/bump_utapi' into w/8.6/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:40:00 +02:00
Anurag Mittal ba99933765
Merge remote-tracking branch 'origin/bugfix/CLDSRV-529/bump_utapi' into w/7.70/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:36:36 +02:00
williamlardier 38d1ac1d2c CLDSRV-553: conditionnaly force evaluating quotas with 0 inflight
A corner case was found, where any PUT from the cold backend would
fail if the quota is already exceeded, as the storage was reserved
for the restore, but the restore itself requires some more bytes
as inflights when evaluating quotas. By passing a flag in the quota
evaluation function, we ensure that we can, in these cases,
evaluate the quotas with 0 inflight.
2024-05-17 08:06:35 +02:00
Taylor McKinnon 4f34a34a11 bf(CLDSRV-529): Bump version 2024-05-16 12:19:45 -07:00
Taylor McKinnon 53f2a159fa bf(CLDSRV-529): Bump utapi 2024-05-16 12:18:24 -07:00
Maha Benzekri 63f6a75a86
CLDSRV-530: bump project version 2024-05-10 18:36:01 +02:00
Maha Benzekri 41acc7968e
CLDSRV-530: from accountwithQuota to accountWithQuotaCount 2024-05-10 18:32:07 +02:00
williamlardier c98c5207fc CLDSRV-520: bump project version 2024-05-10 09:51:02 +02:00
williamlardier 615ee393a4 CLDSRV-520: fix federation image with tsc 2024-05-10 09:51:02 +02:00
williamlardier 97dfc699aa CLDSRV-520: bump cloudserver version 2024-05-10 08:12:35 +02:00
williamlardier 76786282d1 CLDSRV-520: deduplicate quota logic 2024-05-10 08:12:35 +02:00
williamlardier a19d6524be CLDSRV-520: generic quota retrieval latency dashboard 2024-05-10 08:12:35 +02:00
williamlardier bbf6dfba22 CLDSRV-520: monitor quota cleanup
The finalization of quota logic will always be executed. Some tests
are added to ensure the inflights are only cleaned when they are
enabled, and an error happened in the API.
In any case, this ensures we monitor quotas in a single place,
for each of the executed action, and compute correctly the total
duration of the quota impact on the API.
2024-05-10 08:11:27 +02:00
williamlardier f0663fd507 CLDSRV-520: add dashboards 2024-05-10 08:11:27 +02:00
williamlardier d4decbbd6c CLDSRV-520: add alerts 2024-05-10 08:11:27 +02:00
williamlardier 288b2b7b87 CLDSRV-520: observe number of buckets and accounts with quota 2024-05-10 08:11:27 +02:00
williamlardier ccf9b62e59 CLDSRV-520: observe metrics during quota evaluations 2024-05-10 08:11:27 +02:00
williamlardier 9fc2d552ae CLDSRV-520: add metrics for quota 2024-05-07 17:56:24 +02:00
williamlardier d7cc4cf7d5 CLDSRV-515: adapt dockerfile for scubaclient 2024-05-07 16:24:25 +02:00
williamlardier 334d33ef44 CLDSRV-515: unit testing 2024-05-07 16:24:25 +02:00
williamlardier 989b0214d9 CLDSRV-515: functional testing 2024-05-07 16:21:13 +02:00
williamlardier 04d0730f97 CLDSRV-515: clear inflights in case of quota exceeded
- If the quotas are evaluated with success and inflights are
  enabled, it means the quota service will store the information
  and persist it till the next update of the utilization metrics.
  In this case, aany API that will fail after authorization would
  still mean that the bytes are considered, even if nothing was
  written. To overcome that, we call a function from the quota
  evaluation logic to erase anything that we wrote during the
  authorization.
2024-05-07 16:21:13 +02:00
williamlardier fbc642c022 CLDSRV-515: evaluate quotas
Quotas are evaluated:
- As part of the authorization process, after both the bucket and
  the object are authorized. The checks are skipped if the API does
  not need any quota evaluation, if the inflight bytes are 0 (i.e.,
  no data added, so no need to check the quota).
- The Copy APIs will evaluate the quotas when the source object is
  checked. In this particular case, the action is objectGet, so a
  flag is passed to force the quota evaluation logic. A subsequent
  check is done in the logic.
- The restoreObject API has a special case where the extension of
  the restoration duration would still cause the evaluation of the
  quotas, causing a potential increase in the inflights stored. We
  detect this case and remove any added inflight.
2024-05-07 16:21:13 +02:00
williamlardier 104435f0b6 CLDSRV-515: implement the quota logic as an helper file 2024-05-07 16:21:13 +02:00
williamlardier a362ac202e CLDSRV-515: bootstrap scuba on startup 2024-05-07 16:21:13 +02:00
williamlardier 1277e58150 CLDSRV-515: create a wrapper for scubaclient and quota service 2024-05-07 16:21:13 +02:00
williamlardier 7727ccf5f0 CLDSRV-515: add configuration for quotas
- Quota service is generic. We only support scuba backend now,
  but we can add others later, if needed, as long as they share
  the same implementation as the scuba client.
- Scuba configuration is passed for the scubaclient tool.
- Ability to disable the inflights is provided. This changes the
  behavior of the quota checks, so that the inflights won't be
  part of the request to the utilization metrics services. This
  reduces the complexity of the quota evaluation logic in case
  of error, as no cleanup will be needed in this case. This,
  however, requires a backend that can provide up to date metrics
  (i.e., <2s).
2024-05-05 15:31:34 +02:00
williamlardier 71860fc90c CLDSRV-515: do not recreate variable at every authz 2024-05-05 15:31:04 +02:00
williamlardier e504b52de7 CLDSRV-515: bump arsenal and vaultclient, introduce scubaclient 2024-05-02 15:09:23 +02:00
Maha Benzekri b369a47c4d CLDSRV-516: add tests 2024-05-02 14:44:31 +02:00
Maha Benzekri b4fa81e832 CLDSRV-516: implement BucketDeleteQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 1e03d53879 CLDSRV-516: implement BucketGetQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 63e502d419 CLDSRV-516: implement UpdateBucketQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri d2a31dc20a CLDSRV-516: specify the signature version of old auth tests
This is unrelated to the quotas, but blocks the CI.
2024-05-02 14:44:28 +02:00
Maha Benzekri f24411875f CLDSRV-516: introduce quota APIs in router 2024-05-02 14:28:56 +02:00
Maha Benzekri 4fd7faa6a3 CLDSRV-516: bump arsenal version 2024-05-02 14:27:44 +02:00
Francois Ferrand 118aaba702
Use sproxyd from ghcr
Issue: CLDSRV-524
2024-04-18 20:38:37 +02:00
Francois Ferrand e4442fdc52
Merge branch 'w/8.7/improvement/CLDSRV-524' into w/8.8/improvement/CLDSRV-524 2024-04-16 18:36:03 +02:00
Francois Ferrand 7fa199741f
Merge branch 'w/8.6/improvement/CLDSRV-524' into w/8.7/improvement/CLDSRV-524 2024-04-16 18:35:32 +02:00
Francois Ferrand f7f95af78f
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 18:34:49 +02:00
Francois Ferrand 2dc053a784
Merge branch 'w/7.70/improvement/CLDSRV-524' into w/8.6/improvement/CLDSRV-524 2024-04-16 17:57:54 +02:00
Francois Ferrand cc9bb9047e
Merge branch 'improvement/CLDSRV-524' into w/7.70/improvement/CLDSRV-524 2024-04-16 16:58:57 +02:00
Francois Ferrand b824fc0828
Use official docker build steps
The docker-build step from `scality/workflows/` fails to login to
 ghcr, as it picks up the old registry creds.

Issue: CLDSRV-524
2024-04-16 16:54:51 +02:00
Francois Ferrand a2e6d91cf2
Build pykmip image
Issue: CLDSRV-524
2024-04-16 16:54:41 +02:00
Francois Ferrand c1060853dd
Upgrade actions
- artifacts@v4
- cache@v4
- checkout@v4
- codeql@v3
- dependency-review@v4
- login@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5

Issue: CLDSRV-524
2024-04-16 16:54:23 +02:00
Francois Ferrand 227d6edd09
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 16:54:02 +02:00
bert-e b4754c68ea Merge branches 'w/8.8/bugfix/CLDSRV-518/duplication' and 'q/5548/8.7/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.8 2024-03-25 12:56:17 +00:00
bert-e 11aea5d93b Merge branches 'w/8.7/bugfix/CLDSRV-518/duplication' and 'q/5548/8.6/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.7 2024-03-25 12:56:17 +00:00
bert-e 0c50a5952f Merge branches 'w/8.6/bugfix/CLDSRV-518/duplication' and 'q/5548/7.70/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.6 2024-03-25 12:56:16 +00:00
bert-e 4a32e05855 Merge branches 'w/7.70/bugfix/CLDSRV-518/duplication' and 'q/5548/7.10/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/7.70 2024-03-25 12:56:16 +00:00
bert-e 402ed21b14 Merge branch 'bugfix/CLDSRV-518/duplication' into q/7.10 2024-03-25 12:56:16 +00:00
Nicolas Humbert a22719ed47 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-518/duplication' into w/8.8/bugfix/CLDSRV-518/duplication 2024-03-20 08:48:00 +01:00
Nicolas Humbert 41975d539d Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-518/duplication' into w/8.7/bugfix/CLDSRV-518/duplication 2024-03-19 18:12:42 +01:00
Nicolas Humbert c6724eb811 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-518/duplication' into w/8.6/bugfix/CLDSRV-518/duplication 2024-03-19 05:54:35 +01:00
Nicolas Humbert d027006938 Merge remote-tracking branch 'origin/bugfix/CLDSRV-518/duplication' into w/7.70/bugfix/CLDSRV-518/duplication 2024-03-14 20:50:08 +01:00
Nicolas Humbert 92cfd47572 CLDSRV-518 Duplication of version ID in metadata 2024-03-14 16:33:25 +01:00
bert-e 8796bf0f44 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 735fcd04ef Merge branch 'w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
Jonathan Gramain c5522685b2 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:04:21 -07:00
Jonathan Gramain 48df7df271 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:02:52 -07:00
Jonathan Gramain e028eb227f CLDSRV-513 bump cloudserver 2024-03-13 14:00:55 -07:00
Nicolas Humbert caf3146662 CLDSRV-518 fix Ruby dependency: excon
(cherry picked from commit cc1607eaaecb97ab5c48da15f1b1449fe7a4680f)
2024-03-13 13:58:41 -07:00
bert-e 1dee707eb8 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 17:36:39 +00:00
Jonathan Gramain 2c8d69c20a Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 10:18:39 -07:00
Jonathan Gramain 0b2b6ceeb5 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:46:43 -07:00
Jonathan Gramain f4b3f39dc6 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:39:49 -07:00
Jonathan Gramain 84260340d0 CLDSRV-513 bump arsenal dependency 2024-03-13 09:34:30 -07:00
Jonathan Gramain e531abc346 bf: CLDSRV-513 fix request logger for batchDelete
Arsenal's `DataWrapper.batchDelete()` now already creates a request
logger on which it calls `end()` to get the elapsed time. So as
there's no need to create one before the call, remove the
corresponding code.

Note that the main fix is the arsenal version bump which, by creating
a request logger, fixes naturally the forgotten case in
`checkHashMatchMD5`.
2024-03-13 09:31:10 -07:00
Jonathan Gramain 20f6e3089b CLDSRV-513 bump werelogs dependency 2024-03-13 09:31:10 -07:00
bert-e 9dc34f2155 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:14 +00:00
bert-e 08a4c3ade3 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:13 +00:00
Nicolas Humbert d5c731856b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:51:36 +01:00
Nicolas Humbert 584c94692b Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:16:03 +01:00
Nicolas Humbert a0e5257c75 CLDSRV-501 bump arsenal 2024-03-07 10:09:28 +01:00
bert-e 5435c14116 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:40 +00:00
bert-e 38c44ea874 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:39 +00:00
Nicolas Humbert 4200346dd2 CLDSRV-501 skip tests related to Backbeat routes for replication 2024-03-01 17:16:36 +01:00
bert-e 5472d0da59 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
bert-e cdc0bb1128 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
Nicolas Humbert 795f8bcf1c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:44:42 +01:00
Nicolas Humbert 9371d8d734 Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-02-29 08:56:30 +01:00
Nicolas Humbert 3f31c7f3a1 CLDSRV-501 PutMetadata should write metadata on top of a null version 2024-02-27 14:29:35 +01:00
KillianG 39cba3ee6c
Merge remote-tracking branch 'origin/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust' into w/8.8/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust 2024-02-27 11:31:55 +01:00
KillianG a00952712f
Bump 8.7.47
Issue: CLDSRV-512
2024-02-27 10:41:34 +01:00
KillianG a246e18e17
Updatest test for startRestore
Issue: CLDSRV-512
2024-02-27 10:26:19 +01:00
KillianG 3bb3a4d161
Use scaledMsPerDay when restore-adjust
Use scaledMsPerday when restoring an object that has already been restored to be able to make the time go faster for testing purpose

Issue: CLDSRV-512
2024-02-27 10:26:11 +01:00
bert-e c6ba7f981e Merge branches 'w/8.8/bugfix/CLDSRV-498/null' and 'q/5526/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.8 2024-02-21 13:57:14 +00:00
bert-e 69c82da878 Merge branches 'w/8.6/bugfix/CLDSRV-498/null' and 'q/5526/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.6 2024-02-21 13:57:13 +00:00
bert-e 762ae5a0ff Merge branches 'w/8.7/bugfix/CLDSRV-498/null' and 'q/5526/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.7 2024-02-21 13:57:13 +00:00
bert-e 89dfc794a6 Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/7.70 2024-02-21 13:57:12 +00:00
bert-e 3205d117f5 Merge branches 'w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.8 2024-02-20 13:05:07 +00:00
bert-e 4eafae44d8 Merge branches 'w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/7.70/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.6 2024-02-20 13:05:06 +00:00
bert-e 4cab3c84f3 Merge branches 'w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.7 2024-02-20 13:05:06 +00:00
bert-e e3301a2db9 Merge branch 'bugfix/CLDSRV-508-fix-bucket-tagging' into q/7.70 2024-02-20 13:05:05 +00:00
williamlardier 0dcc93cdbe Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:49:56 +01:00
williamlardier 2f2f91d6e8 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:48:05 +01:00
williamlardier a28b141dfb Merge remote-tracking branch 'origin/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:43:22 +01:00
williamlardier 46fe061895 CLDSRV-508: bump project version 2024-02-20 12:44:23 +01:00
williamlardier 34202eaa62 CLDSRV-508: add tests for bucket tagging APIs 2024-02-20 12:44:07 +01:00
williamlardier 4d343fe468 CLDSRV-508: standardize XML with object tagging API 2024-02-20 12:42:34 +01:00
williamlardier 229e641f88 CLDSRV-508: add missing parameters in buckjet tagging APIs 2024-02-20 12:42:18 +01:00
bert-e 1433973e5c Merge branch 'w/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 201170b1ed Merge branch 'w/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e f13985094e Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.6/bugfix/CLDSRV-498/null 2024-02-20 11:24:07 +00:00
Nicolas Humbert 395033acd2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-498/null' into w/7.70/bugfix/CLDSRV-498/null 2024-02-20 12:13:38 +01:00
Nicolas Humbert 632ef26826 CLDSRV-498 Handling isNull master version with no versionId
In certain cases, a master version may not have a versionId and be set as null (isNull:true). For instance, this occurs when a customer:

Create a bucket.

Put an object to it.

Put bucket versioning.

Put metadata (BackbeatClient.putMetadata), which results in the master version being set to null (isNull:true) with no versionId.

Currently, if an object is put after these steps, CloudServer fails to appropriately generate a null version. This is because CloudServer doesn't handle situations where the master version is set to isNull:true with no versionId.

The correct approach when an object is put should be to:

Create the new version key.

Create a new null version key, assigning it a “default non-version version id”.

Update this “default non-version version id” to the `nullVersionId` field of the master key.
2024-02-20 12:04:53 +01:00
bert-e 242b2ec85a Merge branches 'w/8.8/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.7/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.8 2024-02-19 15:00:59 +00:00
bert-e 3186a97113 Merge branches 'w/8.7/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.6/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.7 2024-02-19 15:00:59 +00:00
bert-e 3861b8d317 Merge branch 'q/5534/7.10/bugfix/CLDSRV-505-ip-handling-fix' into tmp/normal/q/7.70 2024-02-19 15:00:58 +00:00
bert-e bb278f7d7e Merge branches 'w/8.6/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/7.70/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.6 2024-02-19 15:00:58 +00:00
bert-e 3b9309490d Merge branch 'bugfix/CLDSRV-505-ip-handling-fix' into q/7.10 2024-02-19 15:00:57 +00:00
Will Toozs 0118dfabbb
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-505-ip-handling-fix' into w/8.8/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:40:58 +01:00
Will Toozs ff40dfaadf
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-505-ip-handling-fix' into w/8.7/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:25:18 +01:00
Will Toozs 9a31236da0
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-505-ip-handling-fix' into w/8.6/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:22:08 +01:00
Will Toozs 61ebacfbf3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-505-ip-handling-fix' into w/7.70/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 14:26:43 +01:00
Will Toozs aa646ced28
CLDSRV-505: bump CS version 2024-02-19 12:00:41 +01:00
Will Toozs f2ca37b5fb
CLDSRV-505: update ip check tests for arrays 2024-02-19 12:00:41 +01:00
Will Toozs 9d74cedde8
CLDSRV-505: update ip check for arrays 2024-02-19 12:00:41 +01:00
bert-e 9c99a6980f Merge branches 'w/8.8/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.8 2024-02-19 10:16:26 +00:00
bert-e d4e255781b Merge branches 'w/8.7/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.7 2024-02-19 10:16:26 +00:00
bert-e f5763d012e Merge branches 'w/8.6/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.70/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.6 2024-02-19 10:16:24 +00:00
bert-e 8fb740cf09 Merge branch 'bugfix/CLDSRV-507-bp-fixes' into q/7.10 2024-02-19 10:16:23 +00:00
bert-e 55c8d89de2 Merge branches 'w/7.70/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.10/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/7.70 2024-02-19 10:16:23 +00:00
bert-e 1afaaec0ac Merge branch 'w/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.8/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:24 +00:00
bert-e e20e458971 Merge branch 'w/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.7/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:23 +00:00
williamlardier 56e52de056 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-507-bp-fixes' into w/8.6/bugfix/CLDSRV-507-bp-fixes 2024-02-19 10:01:09 +01:00
williamlardier d9fc4aae50 Merge remote-tracking branch 'origin/bugfix/CLDSRV-507-bp-fixes' into w/7.70/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:54:06 +01:00
williamlardier 08de09a2ab CLDSRV-507: bump arsenal version 2024-02-19 09:48:13 +01:00
bert-e bef9220032 Merge branches 'w/8.8/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.8 2024-02-15 18:43:31 +00:00
bert-e de20f1efdc Merge branches 'w/8.7/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.6/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.7 2024-02-15 18:43:31 +00:00
bert-e 4817f11f36 Merge branches 'w/8.6/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.70/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.6 2024-02-15 18:43:30 +00:00
bert-e a6b283f5a2 Merge branch 'bugfix/CLDSRV-497/putmetadata' into q/7.10 2024-02-15 18:43:29 +00:00
bert-e 3f810a7596 Merge branches 'w/7.70/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.10/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/7.70 2024-02-15 18:43:29 +00:00
bert-e b89d19c9f8 Merge branch 'w/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:58:27 +00:00
Nicolas Humbert 4dc9788629 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-497/putmetadata' into w/8.7/bugfix/CLDSRV-497/putmetadata 2024-02-15 18:43:28 +01:00
Nicolas Humbert 65a891d6f8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-497/putmetadata' into w/8.6/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:51:48 +01:00
bert-e 2ecca4feef Merge branch 'bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/7.70/bugfix/CLDSRV-497/putmetadata 2024-02-15 16:34:04 +00:00
Nicolas Humbert c52a3a6e44 CLDSRV-497 Fix BackbeatClient.putMetadata with versionID
Issue: When Cloudserver BackbeatClient.putMetadata() option fields are sent to Metadata through the query string, they are converted to strings. As a result, Metadata interprets the value undefined in the versionId field as an empty string ('').

Background: Previously, the 'crrExistingObject' script used this bug/behavior as a workaround to generate an internal version ID to replicate null version (= objects created before versioning was enabled). However, this approach has led to inconsistencies, occasionally resulting in the creation of multiple null internal versions.

Resolution: To address this issue, the 'crrExistingObject' workaround will be deprecated. Instead, Backbeat will be enhanced to support the replication of null versions directly, thereby ensuring more reliable and consistent behavior in handling versioning.
2024-02-15 17:31:23 +01:00
williamlardier d82965ff78 CLDSRV-507: normalize request types 2024-02-15 09:26:45 +01:00
williamlardier f488a65f15 CLDSRV-507: support no object metadata for MPU APIs resource authz
The MPU APIs are dealing with object resources. At the time the
Bucket Policies and ACLs were only evaluated when there was an
Allow from IAM, there was no need to handle this case.
However now, these APIs are evaluating the bucket policies and
ACLs and because there is no object metadata associated, we
end up allowing requests without any permission by relying
on the existing code, where the permission is changed and becomes
"bucketGet". We must treat MPU APIs as different APIs and check
the right permission. For that, we rely on the updated bucket
policy action map in arsenal wth these APIs, and ensure that we
properly map that to the existing logic where we only checked the
"objectPut" permission to handle these 3 specific APIs:

- initiate MPU
- upload part
- complete MPU
2024-02-14 15:16:48 +01:00
williamlardier 40a575a717 CLDSRV-507: use correct action for put part APIs 2024-02-14 15:16:48 +01:00
williamlardier fea82f15ea CLDSRV-507: use correct action for MPU 2024-02-14 15:16:48 +01:00
bert-e 06dc042154 Merge branches 'w/8.8/improvement/CLDSRV-502' and 'q/5528/8.7/improvement/CLDSRV-502' into tmp/octopus/q/8.8 2024-02-08 13:49:18 +00:00
bert-e aa4643644a Merge branches 'w/8.7/improvement/CLDSRV-502' and 'q/5528/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.7 2024-02-08 13:49:18 +00:00
bert-e 89edf7e3d0 Merge branch 'w/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.6 2024-02-08 13:49:18 +00:00
Francois Ferrand 4c7d3ae4bc
Merge branch 'w/8.7/improvement/CLDSRV-502' into w/8.8/improvement/CLDSRV-502 2024-02-05 18:50:27 +01:00
Francois Ferrand 23883dae8b
Merge branch 'w/8.6/improvement/CLDSRV-502' into w/8.7/improvement/CLDSRV-502 2024-02-05 18:50:12 +01:00
Francois Ferrand e616ffa374
gha: fix test alert trigger to match other premerge build
Issue: CLDSRV-502
2024-02-05 18:49:31 +01:00
Francois Ferrand 515c20e4cf
Merge branch 'w/7.70/improvement/CLDSRV-502' into w/8.6/improvement/CLDSRV-502 2024-02-05 18:48:18 +01:00
Francois Ferrand f8eedddebf
Merge branch 'improvement/CLDSRV-502' into w/7.70/improvement/CLDSRV-502 2024-02-05 18:48:01 +01:00
Francois Ferrand f3654e4fb8
Fix trigger for codeql jobs
Build on pull request & bert-e queue build, and skip rebuild when PR
lands on development branch.

Issue: CLDSRV-502
2024-02-05 18:47:30 +01:00
Francois Ferrand 517fb99190
gha: add release name to release job
Issue: CLDSRV-502
2024-02-05 18:46:02 +01:00
Francois Ferrand 531c83a359
Release 8.8.17
Issue: CLDSRV-500
2024-02-05 17:35:43 +01:00
Francois Ferrand b84fa851f7
Merge branch 'w/8.7/bugfix/CLDSRV-500' into w/8.8/bugfix/CLDSRV-500 2024-02-05 17:35:20 +01:00
Francois Ferrand 4cb1a879f7
Release 8.7.44
Issue: CLDSRV-500
2024-02-05 17:34:45 +01:00
Francois Ferrand 7ae55b20e7
Merge branch 'bugfix/CLDSRV-500' into w/8.7/bugfix/CLDSRV-500 2024-02-05 17:32:53 +01:00
Francois Ferrand d0a6fa17a5
Release 8.6.24
Issue: CLDSRV-500
2024-02-05 17:31:36 +01:00
Francois Ferrand 7275459f70
Use rate interval in `Request time` panel
- Should use $__rate_interval, which handles small time range.
- Regenerating the dashboard also fixes the 'latency per s3 action'
  panel.

Issue: CLDSRV-500
2024-02-01 15:49:29 +01:00
Hervé Dombya 363afcd17f CLDSRV-473: fix cors issues in getVeeamFile 2024-01-26 15:59:10 +01:00
Frédéric Meinnel 1cf0250ce9 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.8/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:06:05 +01:00
Frédéric Meinnel 20d0b38d0b Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:05:39 +01:00
Frédéric Meinnel 9988a8327a Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:06:31 +01:00
Frédéric Meinnel b481d24637 Merge remote-tracking branch 'origin/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:01:59 +01:00
Frédéric Meinnel 71625774c1 CLDSRV-494: version bump 2024-01-23 13:42:36 +01:00
Frédéric Meinnel 9b9338f2b8 CLDSRV-494: Fix generateV4Headers for HTTP PUT with body 2024-01-23 13:42:31 +01:00
Frédéric Meinnel 601619f200 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.8/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:24:05 +01:00
Frédéric Meinnel a92e71fd50 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:22:55 +01:00
Frédéric Meinnel 8802ea0617 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:21:42 +01:00
Frédéric Meinnel acc5f74787 Merge remote-tracking branch 'origin/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:20:10 +01:00
Frédéric Meinnel e3c093f352 CLDSRV-493: Version bump 2024-01-17 13:18:32 +01:00
Frédéric Meinnel e17383a678 CLDSRV-493: Fix dates accepted in lifecycle configuration 2024-01-17 13:18:32 +01:00
bert-e 43f62b847c Merge branch 'w/8.7/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.8/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e a031905bba Merge branch 'w/8.6/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.7/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e 13ad6881f4 Merge branch 'bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.6/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:38 +00:00
Mickael Bourgois dea5173075
CLDSRV-492: remove duplicate error monitoring 2024-01-16 21:34:26 +01:00
Mickael Bourgois b3f96198fe
CLDSRV-492: update monitoring head 2024-01-15 14:48:08 +01:00
Mickael Bourgois 5e2dd8cccb
Merge remote-tracking branch 'origin/development/7.70' into bugfix/CLDSRV-492-head-monitoring 2024-01-15 11:56:50 +01:00
bert-e cd2406b827 Merge branches 'w/8.8/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.8 2024-01-15 09:47:24 +00:00
bert-e 62f707caff Merge branches 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.7 2024-01-15 09:47:23 +00:00
bert-e f01ef00a52 Merge branches 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.6 2024-01-15 09:47:23 +00:00
bert-e 30fb64e443 Merge branch 'bugfix/CLDSRV-489-redirect-folder-index' into q/7.10 2024-01-15 09:47:22 +00:00
bert-e 054107d8fb Merge branches 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.10/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/7.70 2024-01-15 09:47:22 +00:00
bert-e 848bf318fe Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:46 +00:00
bert-e 0beb48a1fd Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:45 +00:00
bert-e 618d4dffc7 Merge branches 'development/8.6' and 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:44 +00:00
bert-e b5aae192f7 Merge branches 'development/7.70' and 'bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:43 +00:00
Mickael Bourgois 557f3dcde6
CLDSRV-489: fix lint indentation 2024-01-12 10:07:39 +01:00
Mickael Bourgois 3291af36bb
CLDSRV-489: Apply style suggestions
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-12 09:53:57 +01:00
Will Toozs d274acd8ed
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-436-bump-version' into w/8.8/improvement/CLDSRV-436-bump-version 2024-01-11 13:10:57 +01:00
Will Toozs e6d9e8fc35
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-436-bump-version' into w/8.7/improvement/CLDSRV-436-bump-version 2024-01-11 11:50:25 +01:00
Will Toozs b08edefad6
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bump-version' into w/8.6/improvement/CLDSRV-436-bump-version 2024-01-11 11:24:50 +01:00
Will Toozs e9c353d62a
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bump-version' into w/7.70/improvement/CLDSRV-436-bump-version 2024-01-11 11:04:53 +01:00
Will Toozs c7c55451a1
CLDSRV-436: bump package version 2024-01-11 10:45:47 +01:00
bert-e 7bb004586d Merge branch 'w/8.7/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.8/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:49 +00:00
bert-e d48de67723 Merge branch 'w/8.6/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.7/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:48 +00:00
Will Toozs fa4dec01cb
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bp-conds' into w/8.6/improvement/CLDSRV-436-bp-conds 2024-01-10 22:00:37 +01:00
Will Toozs 4f79a9c59c
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bp-conds' into w/7.70/improvement/CLDSRV-436-bp-conds 2024-01-10 21:43:08 +01:00
Will Toozs 05c759110b
CLDSRV-436: update dummyRequest of unit tests 2024-01-10 21:02:15 +01:00
Will Toozs deae294a81
CLDSRV-436: unit test policy condition validation 2024-01-10 21:02:15 +01:00
Will Toozs ab587385e6
CLDSRV-436: add functional test cases for conditions 2024-01-10 21:01:44 +01:00
Will Toozs 6243911072
CLDSRV-436: update tests 2024-01-10 20:59:26 +01:00
Will Toozs da804054e5
CLDSRV-436: update put retention logic 2024-01-10 20:57:38 +01:00
Will Toozs 493a6da773
CLDSRV-436: update put policy logic 2024-01-10 20:57:38 +01:00
Will Toozs 7ecdd11783
CLDSRV-436: add conditions logic 2024-01-10 20:57:37 +01:00
Mickael Bourgois 7e53b67c90
CLDSRV-492: fix monitoring for website head
Match head before the merging in CLDSRV-482
2024-01-10 20:29:20 +01:00
bert-e b141c59bb7 Merge branch 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 0b79ecd942 Merge branch 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 86ece5c264 Merge branch 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:56 +00:00
Mickael Bourgois 0b79cd6af6
Merge remote-tracking branch 'origin/bugfix/CLDSRV-489-redirect-folder-index' into w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 19:32:46 +01:00
Mickael Bourgois a51b5e0af3
CLDSRV-489: test redirect 302 on folder without / 2024-01-10 19:10:57 +01:00
bert-e 10ca6b98fa Merge branch 'w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.8/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 171925732f Merge branch 'w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
Taylor McKinnon 6d36f9c867 Merge remote-tracking branch 'origin/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 10:04:49 -08:00
Taylor McKinnon 1a21c4f867 impr(CLDSRV-475): Bump version to 7.70.41 2024-01-10 10:02:36 -08:00
Taylor McKinnon 866dec1b81 impr(CLDSRV-475): Add isDeleteMarker to overhead fields 2024-01-10 10:02:15 -08:00
Mickael Bourgois 9491e82235
CLDSRV-489: redirect 302 on folder without /
If a key is not found, we must check if key/index.html
is accessible to redirect to append a trailing /
to the key

@see https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html#IndexDocumentsandFolders
2024-01-10 17:39:13 +01:00
bert-e 70e8b20af9 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 0ec5f4fee5 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 6c468a01d9 Merge branch 'w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
bert-e 3d2b75f344 Merge branch 'bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
Mickael Bourgois 5811fa5326
CLDSRV-485: fix linter in tests for 8.6 2024-01-10 13:50:11 +01:00
bert-e e600677545 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 72e5da10b7 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
Mickael Bourgois de0e7e6449
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 13:15:29 +01:00
Mickael Bourgois 97b5ed6dd3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-485-custom-err-redirect' into w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:09:35 +01:00
Mickael Bourgois dad8a3ee37
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:02:54 +01:00
Mickael Bourgois 8aca658c5c
CLDSRV-485: bump arsenal 2024-01-10 11:52:27 +01:00
bert-e 759817c5a0 Merge branch 'w/8.7/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e 035c7e8d7f Merge branch 'w/8.6/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
Mickael Bourgois b8af1225d5
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-482-head-redirect-index' into w/8.6/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:28:13 +01:00
Mickael Bourgois 40faa5f3fa
Merge remote-tracking branch 'origin/bugfix/CLDSRV-482-head-redirect-index' into w/7.70/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:19:09 +01:00
Mickael Bourgois 1fc8622614
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:01:51 +01:00
Mickael Bourgois a0acefb4a8
CLDSRV-482: apply style suggestion
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2024-01-10 10:13:08 +01:00
bert-e de27a5b88e Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e a4cc5e45f3 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e 621cb33680 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
bert-e b025443d21 Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
Mickael Bourgois d502a81284
CLDSRV-488: fix lint 2024-01-10 09:56:27 +01:00
bert-e 9a8b707e82 Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:32 +00:00
bert-e 002dbe0019 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e 59e52f6df2 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e b52f2356ba Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:30 +00:00
Mickael Bourgois 60679495b6
CLDSRV-488: apply review suggestion
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-10 09:53:24 +01:00
Mickael Bourgois 9dfacd0827
CLDSRV-482: factorize website GET and HEAD 2024-01-09 18:45:02 +01:00
Mickael Bourgois 485ef1e9bb
CLDSRV-482: test routing and implicit index 2024-01-09 17:18:07 +01:00
Mickael Bourgois 5e041ca5e7
CLDSRV-482: fix head implicit index
Routing check must be performed before added index prefix
To prevent matching a routing rule on the index
2024-01-09 17:18:07 +01:00
Mickael Bourgois 52137772d9
Merge branch 'development/7.10' into bugfix/CLDSRV-488-error-type-bp 2024-01-09 16:44:18 +01:00
Mickael Bourgois fcf193d033
CLDSRV-488: move website condition, replace flag 2024-01-09 16:40:55 +01:00
Mickael Bourgois fb61cad786
CLDSRV-485: test website redirect custom error 2024-01-08 18:00:32 +01:00
Mickael Bourgois b6367eb2b8
CLDSRV-485: website redirect from custom error doc 2024-01-08 17:58:09 +01:00
bert-e d803bdcadc Merge branch 'w/8.7/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.8/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:57 +00:00
bert-e 4f1b8f25b7 Merge branch 'w/8.6/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.7/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 94363482c3 Merge branch 'w/7.70/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.6/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 6b0a8cb9ed Merge branch 'bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/7.70/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:55 +00:00
Will Toozs 5dbf5d965f
CLDSRV-477: add tests 2024-01-08 14:43:41 +01:00
Will Toozs ebefc4b5b0
CLDSRV-477: change position of ACL check 2024-01-08 14:43:40 +01:00
Mickael Bourgois ac1c75e414
CLDSRV-488: test website 404 with bucket policy 2024-01-05 12:52:51 +01:00
Mickael Bourgois fee4f3a96e
CLDSRV-488: fix website 404 with bucket policy
If bucket policy authorize access to a
non existant object, there should be a 404
and not a 403
2024-01-05 12:52:50 +01:00
bert-e e969eeaa20 Merge branches 'w/8.8/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.8 2024-01-05 11:24:59 +00:00
bert-e 2ee78bcf6a Merge branches 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.7 2024-01-05 11:24:58 +00:00
bert-e 64273365d5 Merge branches 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.6 2024-01-05 11:24:58 +00:00
bert-e 65c6bacd34 Merge branches 'w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.10/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/7.70 2024-01-05 11:24:57 +00:00
bert-e d60d252eaf Merge branch 'bugfix/CLDSRV-490-bucket-policy-resource' into q/7.10 2024-01-05 11:24:57 +00:00
bert-e f31fe2f2bf Merge branch 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.8/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
bert-e ee47cece90 Merge branch 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.7/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
Mickael Bourgois 7a5cddacbc
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into w/8.6/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:08:54 +01:00
Mickael Bourgois baa6203b57
Merge remote-tracking branch 'origin/bugfix/CLDSRV-490-bucket-policy-resource' into w/7.70/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:04:25 +01:00
Mickael Bourgois 141056637b
CLDSRV-490: bump version 2024-01-05 11:51:49 +01:00
Mickael Bourgois 0f007e0489
CLDSRV-490: fix linting in tests for 8.6 2024-01-05 11:51:48 +01:00
Mickael Bourgois 2d50a76923
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-486-object-redirect-root' into w/8.8/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:59:20 +01:00
Mickael Bourgois 6b4f10ae56
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-486-object-redirect-root' into w/8.7/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:57:36 +01:00
Mickael Bourgois 23eaf89cc3
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-486-object-redirect-root' into w/8.6/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:55:48 +01:00
Mickael Bourgois d6a2144508
Merge remote-tracking branch 'origin/bugfix/CLDSRV-486-object-redirect-root' into w/7.70/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:49:20 +01:00
Mickael Bourgois 40dd3f37a4
Merge branch 'development/7.10' into bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:36:03 +01:00
Mickael Bourgois d3307654a6
CLDSRV-486: bump cloudserver version 2024-01-04 16:34:10 +01:00
Mickael Bourgois e342a90b48
CLDSRV-486: bump arsenal version 2024-01-04 16:29:04 +01:00
williamlardier dbda5f16a6 CLDSRV-407: bump mongodb to v5.0 in CI 2024-01-04 14:04:20 +01:00
Mickael Bourgois d4a4825668
CLDSRV-490: test bucket policy with request 2024-01-04 10:18:36 +01:00
Mickael Bourgois 83b9e9a775
CLDSRV-490: fix missing request for bucket policy
If request is missing, bucket policy ignore resource
and apply effect to any matching principal and action
2024-01-03 18:24:54 +01:00
Maha Benzekri 2959c950dd
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.8/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:36:20 +01:00
Maha Benzekri 462ddf7ef1
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:34:44 +01:00
Maha Benzekri fda42e7399
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:32:41 +01:00
Maha Benzekri edbd6caeb4
Merge remote-tracking branch 'origin/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 09:38:25 +01:00
Maha Benzekri 1befaa1f28
CLDSRV-480: CLDSRV version bump 2024-01-03 09:35:19 +01:00
Maha Benzekri 0cefca831d
CLDSRV-480: condition check fix for isImplicit 2024-01-03 09:34:19 +01:00
Jonathan Gramain ea7b69e313 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:26:27 -08:00
Jonathan Gramain 8ec1c2f2db Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:08:40 -08:00
Jonathan Gramain 3af6ca5f6d Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:06:45 -08:00
Jonathan Gramain 997d71df08 Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 14:49:08 -08:00
Jonathan Gramain 275ebcec5c CLDSRV-478 bump cloudserver version 2024-01-02 14:45:56 -08:00
Mickael Bourgois 8b77530b2b
CLDSRV-486: fix object redirect to root / 2024-01-02 19:16:32 +01:00
bert-e 43f9606598 Merge branch 'w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:02 +00:00
bert-e be34e5ad59 Merge branch 'w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:01 +00:00
Jonathan Gramain 5bc64ede43 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:41:03 -08:00
Jonathan Gramain 911010376e Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:26:30 -08:00
Jonathan Gramain b5ec37b38b bf: CLDSRV-478 bump arsenal dependency 2024-01-02 09:19:15 -08:00
Mickael Bourgois 3ce869cea3
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-479-website-fqdn-index' into w/8.8/bugfix/CLDSRV-479-website-fqdn-index
# Conflicts:
#	package.json
2024-01-02 11:40:28 +01:00
Mickael Bourgois b7960784db
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-479-website-fqdn-index' into w/8.7/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:35:36 +01:00
Mickael Bourgois 5ac10cefa8
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-479-website-fqdn-index' into w/8.6/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:33:49 +01:00
Mickael Bourgois 2dafefd77f
Merge remote-tracking branch 'origin/bugfix/CLDSRV-479-website-fqdn-index' into w/7.70/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:29:47 +01:00
Mickael Bourgois 36f147b441
CLDSRV-479: update test bucket policy index 2024-01-02 11:13:40 +01:00
Mickael Bourgois 8ed447ba63
CLDSRV-479: helper function for index append 2024-01-02 10:27:38 +01:00
bert-e bf235f3335 Merge branch 'w/8.7/bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.8/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:53 +00:00
bert-e 569c9f4368 Merge branch 'bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:52 +00:00
Nicolas Humbert 92cf03254a CLDSRV-483 Improve Ruby test output readability and Enable backtrace 2023-12-31 11:08:21 +01:00
Nicolas Humbert c57ae9c8ea CLDSRV-483 Bump ruby patch version to fix malformed header response
More info about the malformed header response: https://github.com/excon/excon/issues/845
2023-12-31 11:08:16 +01:00
Mickael Bourgois 5bec42d051
CLDSRV-479: test index with bucket policy 2023-12-29 17:43:34 +01:00
Mickael Bourgois f427fc9b70
CLDSRV-479: bump version 2023-12-28 15:20:59 +01:00
Mickael Bourgois 9aad4ae3ea
CLDSRV-479: fix error on index using bucket policy
The variable holding the new objectKey with index suffix
is not propagated to bucket policy function.
_checkBucketPolicyResources function extract objectKey from request.
2023-12-28 15:20:48 +01:00
bert-e 1a3cb8108c Merge branch 'q/5495/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 042120b17e Merge branch 'q/5495/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e ba4593592d Merge branch 'w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 6efdb627da Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e 5306bf0b5c Merge branch 'q/5495/7.70/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 5b22819c3f Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 126ca3560f Merge branch 'improvement/CLDSRV-451-specific-7.70-apis-update' into q/7.70 2023-12-15 06:44:02 +00:00
bert-e e5b692f3db Merge branch 'w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.8/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:49 +00:00
bert-e 548ae8cd12 Merge branch 'w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:48 +00:00
Taylor McKinnon 80376405df Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:30:13 -08:00
Taylor McKinnon a612e5c27c Merge remote-tracking branch 'origin/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:27:03 -08:00
Taylor McKinnon c3b7662086 impr(CLDSRV-467): Bump Utapi dependency to 7.10.15 2023-12-14 10:17:18 -08:00
Taylor McKinnon 818b1e60d1 impr(CLDSRV-467): Add new Utapi Reindex option `utapi.reindex.onlyCountLatestWhenObjectLocked` 2023-12-14 10:17:18 -08:00
bert-e 2a919af071 Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:47 +00:00
bert-e 5c300b8b6c Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:46 +00:00
Maha Benzekri ad3ebd3db2
CLDSRV-451: fix on gettagging 2023-12-14 18:21:24 +01:00
Maha Benzekri 99068e7265
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:36:17 +01:00
Maha Benzekri cd039d8133
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update
In this commit the only api change compared to the 8.6 is the
routeVeeam.
2023-12-14 17:33:03 +01:00
Maha Benzekri dd3ec25d74
Merge remote-tracking branch 'origin/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update
In this merge, we have updated the tagging apis along with the
lifecycle apis and metadata search apis and objectRestore, unit test
for objectRestore has been updated as well.
2023-12-14 17:28:46 +01:00
Maha Benzekri 717228bdfc
CLDSRV-451: bump Cloudserver version 2023-12-14 16:59:10 +01:00
Maha Benzekri 836fc80560
CLDSRV-451: updating buckettagging apis for impDeny 2023-12-14 16:58:14 +01:00
Maha Benzekri 75b293df8d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.8/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:37:14 +01:00
Maha Benzekri a855e38998
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:35:02 +01:00
Maha Benzekri 51d5666bec
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:32:36 +01:00
Maha Benzekri ecb74a2db3
Merge remote-tracking branch 'origin/improvement/CLDSRV-431-misc-api-implicitDeny' into w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:26:57 +01:00
Maha Benzekri cdcdf8eff0
CLDSRV-431: cloudserver version bump 2023-12-14 12:22:42 +01:00
Maha Benzekri dc39b37877
CLDSRV-431: arsenal bump 2023-12-14 12:21:53 +01:00
Maha Benzekri 4897b3c720
CLDSRV-431: changes on misc api for impDeny 2023-12-13 11:14:21 +01:00
Maha Benzekri ffe4ea4afe
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.8/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 14:47:24 +01:00
Maha Benzekri a16cfad0fc
CLDSRV-474: mongodb_image on all jobs 2023-12-12 14:06:02 +01:00
bert-e 556163e3e9 Merge branch 'w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:55:56 +00:00
Maha Benzekri 8fe9f16661
CLDSRV-474: Removing the docker-compose commands from the tests.yaml 2023-12-12 13:53:53 +01:00
Maha Benzekri eb9ff85bd9
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 13:52:50 +01:00
bert-e 52994c0177 Merge branch 'improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:44:56 +00:00
tmacro e109b0fca7
CLDSRV-474: fix CI fail 2023-12-12 10:21:01 +01:00
Maha Benzekri 9940699f9d
CLDSRV-474: fixup on mutiObjectDelete 2023-12-12 10:11:18 +01:00
Maha Benzekri 869d554e43
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.8/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:42:25 +01:00
Maha Benzekri 2f8b228595
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:39:20 +01:00
Maha Benzekri 539b2c1630
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:35:11 +01:00
Maha Benzekri 320766e7b2
Merge remote-tracking branch 'origin/improvement/CLDSRV-430-delete-api-implicitDeny' into w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:31:56 +01:00
Maha Benzekri 74425d03f8
CLDSRV-430: version bump 2023-12-08 18:29:19 +01:00
Maha Benzekri 91629a0d18
CLDSRV-430: add delete API implicit deny logic
As for multiObjectDelete,a new function was added to
ensure that all actions are allowed.
2023-12-08 18:29:17 +01:00
Maha Benzekri e44b7ed918
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 12:00:50 +01:00
Maha Benzekri 3cb29f7f8e
CLDSRV-429: version bump for version release 2023-12-05 12:00:09 +01:00
Maha Benzekri 4f08a4dff2
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 11:58:27 +01:00
Maha Benzekri 15a1aa7965
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 10:58:04 +01:00
Maha Benzekri 4470ee9125
CLDSRV-429: version bump for release 2023-12-05 10:55:31 +01:00
Francois Ferrand d8c12597ea
Release cloudserver 8.8.7
Issue: CLDSRV-471
2023-12-01 19:03:38 +01:00
Francois Ferrand c8eb9025fa
Merge remote-tracking branch 'origin/improvement/CLDSRV-471' into w/8.8/improvement/CLDSRV-471 2023-12-01 19:03:17 +01:00
Francois Ferrand 57e0f71e6a
Release cloudserver 8.7.33
Issue: CLDSRV-471
2023-12-01 19:01:30 +01:00
Francois Ferrand f22f920ee2
Bump arsenal 8.1.115
Issue: CLDSRV-471
2023-12-01 18:42:26 +01:00
Maha Benzekri ed1bb6301d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:31:50 +01:00
Maha Benzekri 70dfa5b11b
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:29:14 +01:00
Maha Benzekri f17e7677fa
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:27:44 +01:00
Maha Benzekri 63b00fef55
Merge remote-tracking branch 'origin/improvement/CLDSRV-429-get-apis-implicitDeny' into w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:25:04 +01:00
Maha Benzekri b4f0d34abd
CLDSRV-429: version bump 2023-12-01 10:27:58 +01:00
Maha Benzekri e18f83ef0d
CLDSRV-429: update get apis with impDeny logic 2023-11-30 17:17:30 +01:00
Francois Ferrand a4e6f9d034
Add lifecycle restore duration metrics
Issue: CLDSRV-471
2023-11-30 14:55:01 +01:00
Maha Benzekri cf94b9de6a
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:23:08 +01:00
Maha Benzekri da0492d2bb
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:22:32 +01:00
Maha Benzekri 979b9065ed
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:19:27 +01:00
Maha Benzekri d5a3923f74
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:18:06 +01:00
Maha Benzekri 23cbbdaaed
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:17:05 +01:00
Maha Benzekri e506dea140
Merge remote-tracking branch 'origin/development/8.6' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:15:52 +01:00
Maha Benzekri 78721be7f7
Merge remote-tracking branch 'origin/improvement/CLDSRV-428-put-apis-impDeny' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:10:26 +01:00
Maha Benzekri 02c5a46d14
Merge remote-tracking branch 'origin/development/7.70' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:06:41 +01:00
Maha Benzekri b138955ef2
Merge remote-tracking branch 'origin/development/7.10' into HEAD 2023-11-29 16:01:54 +01:00
Maha Benzekri 7d10e5d69e
CLDSRV-428:Bump CLDSRV version 2023-11-29 15:59:28 +01:00
bert-e bc291fe3a7 Merge branches 'w/8.8/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.8 2023-11-27 17:16:14 +00:00
bert-e 8dc7432c51 Merge branches 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.7 2023-11-27 17:16:14 +00:00
bert-e 040fe53e53 Merge branches 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/7.70/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.6 2023-11-27 17:16:13 +00:00
bert-e 60e350a5cf Merge branches 'w/7.70/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/7.10/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/7.70 2023-11-27 17:16:13 +00:00
bert-e 5de00c80f8 Merge branch 'bugfix/CLDSRV-463/bump_cloudserver' into q/7.10 2023-11-27 17:16:13 +00:00
bert-e 6f963bdcd9 Merge branch 'w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:56 +00:00
bert-e cd9024fd32 Merge branch 'w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:55 +00:00
Maha Benzekri 37649bf49b
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 17:01:43 +01:00
Maha Benzekri abf5ea33a9
Merge remote-tracking branch 'origin/improvement/CLDSRV-428-put-apis-impDeny' into w/7.70/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:59:09 +01:00
Maha Benzekri 2596f3fda8
CLDSRV-428: put apis updated for implicit deny
In this commit put apis have been updated to check for implicit deny
returned by vault and added as a parameter in the request Object.
Tests have also been added for the metadataUtils validateBucket
function.
MetadataUtils functions have been updated to check for implicit
deny.
The goal is to implement the same authorization
logic as AWS, where an implicit deny from IAM and an Allow from
the Bucket Policy should allow the request for example.
For the delete on the objectPutCopyPart and objectPutPart as we need to
deferentiate between the vault request and the external backend once
a delete is applied to the request directly as it's unique per API call
this value is then added to the request object. here's the link to the
design doc for more details:
https://github.com/scality/citadel/blob/development/1.0/docs/design/bucket-policies.md?plain=1#L263
2023-11-27 16:47:43 +01:00
bert-e dff7610060 Merge branch 'w/8.7/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.8/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:07 +00:00
bert-e 757c2537ef Merge branch 'w/8.6/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.7/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:06 +00:00
Maha Benzekri c445322685
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-427-permissions-checks' into w/8.6/improvement/CLDSRV-427-permissions-checks 2023-11-17 12:28:19 +01:00
bert-e 2344204746 Merge branch 'improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/7.70/improvement/CLDSRV-427-permissions-checks 2023-11-17 10:50:50 +00:00
Maha Benzekri 693ddf8d35
Merge branch 'development/7.10' into improvement/CLDSRV-427-permissions-checks 2023-11-17 11:40:17 +01:00
Maha Benzekri 6caa5cc26a
CLDSRV-427: Improving functions using helper function
- In this commit , I added a helper (processBucketPolicy) function
for the bycket policies checks that are shared between the
isbucketAuthorized, isObjAuthorized and evaluateBucketPolicyWithIAM
for a better code readability and to avoid long functions.

(cherry picked from commit 33d7c99e0c)
2023-11-17 11:36:22 +01:00
bert-e 4515b2adbf Merge branch 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
bert-e 50ffdd260b Merge branch 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
Taylor McKinnon 3836848c05 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/bump_cloudserver' into w/8.6/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 15:26:23 -08:00
Taylor McKinnon 813a1553d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-463/bump_cloudserver' into w/7.70/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 15:25:13 -08:00
Taylor McKinnon 1238cd809c bf(CLDSRV-463): Bump cloudserver to 7.10.34/7.70.31 2023-11-16 15:23:14 -08:00
bert-e b5f22d8c68 Merge branches 'w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.8 2023-11-16 19:43:14 +00:00
bert-e 68ff54d49a Merge branches 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.7 2023-11-16 19:43:13 +00:00
bert-e a74b3eacf8 Merge branches 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.6 2023-11-16 19:43:13 +00:00
bert-e f00a2f2d9e Merge branch 'q/5403/7.10/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/normal/q/7.70 2023-11-16 19:43:13 +00:00
bert-e 02bb60253a Merge branch 'bugfix/CLDSRV-463/strictly_check_algo_headers' into q/7.10 2023-11-16 19:43:12 +00:00
bert-e 3fe5579c80 Merge branch 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:08 +00:00
bert-e 3fdd2bce21 Merge branch 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:07 +00:00
Taylor McKinnon 44e6eb2550 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 11:20:55 -08:00
Taylor McKinnon c148c770ac Merge remote-tracking branch 'origin/bugfix/CLDSRV-463/strictly_check_algo_headers' into w/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 11:17:40 -08:00
Maha Benzekri fa2f877825
CLDSRV-427: linting fixups and retrocompatibility changes
(cherry picked from commit a7396a721c)
2023-11-15 11:26:02 +01:00
Will Toozs 0e323fbefe
CLDSRV-427: update bucket/object perm checks to account for implicit …
…denies

(cherry picked from commit c01898f1a0)
(cherry picked from commit 7aa326cba9)
2023-11-15 11:26:01 +01:00
bert-e c9b512174f Merge branches 'w/8.8/bugfix/CLDSRV-460-forward-system-signals' and 'q/5431/8.7/bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/q/8.8 2023-11-15 10:14:18 +00:00
bert-e 7b48624cf7 Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into q/8.7 2023-11-15 10:14:17 +00:00
bert-e 55b07def2e Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/w/8.8/bugfix/CLDSRV-460-forward-system-signals 2023-11-15 09:43:35 +00:00
bert-e 62ae2b2c69 Merge branch 'w/7.70/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.6/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e fcc9468b63 Merge branch 'w/8.6/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.7/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e efc44a620d Merge branch 'w/8.7/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.8/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
Maha Benzekri 72342f6654
Merge remote-tracking branch 'origin/improvement/CLDSRV-468-version-bump' into w/7.70/improvement/CLDSRV-468-version-bump 2023-11-14 12:06:02 +01:00
Maha Benzekri fa11e58d57
CLDSRV-468:CLDSRV version bump 2023-11-14 11:57:09 +01:00
bert-e 1bc19b39d7 Merge branches 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.7 2023-11-13 17:20:17 +00:00
bert-e b5fa3a1fd3 Merge branches 'w/8.8/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.8 2023-11-13 17:20:17 +00:00
bert-e 68a6fc659c Merge branches 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.6 2023-11-13 17:20:16 +00:00
bert-e 2624a05018 Merge branches 'w/7.70/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/7.10/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/7.70 2023-11-13 17:20:16 +00:00
bert-e 0882bfffb9 Merge branch 'improvement/CLDSRV-466/timestamps_in_stderr' into q/7.10 2023-11-13 17:20:15 +00:00
bert-e c0fc958365 Merge branch 'w/8.7/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.8/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:26 +00:00
bert-e d3c74d2c16 Merge branch 'w/8.6/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.7/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:25 +00:00
Maha Benzekri 9001285177
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-426-acl-impl-deny' into w/8.6/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 17:02:22 +01:00
bert-e bae6e8ecb3 Merge branch 'improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/7.70/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 15:56:01 +00:00
Will Toozs e0eab954aa
CLDSRV-426: add tests for ACL permission check updates
CLDSRV-426: additionnal test for ACL permission
2023-11-13 13:10:38 +01:00
Will Toozs 19b4e25373
CLDSRV-426: update ACL permission checks for implicitDeny logic
CLDSRV-426:fixups on ACL permission checks for implicitDeny logic

CLDSRV-426:better readability on ACL permission
2023-11-13 13:10:37 +01:00
Kerkesni 07eda89a3f
forward system signals to the node process using tini
npm run doesn’t handle signal forwarding and crashes
on the SIGTERM signal sent by Kubernetes.

Tini spawns a process at PID 1 that handles forwarding
system signals to all it's child processes.

Issue: CLDSRV-460
2023-11-13 12:07:29 +01:00
bert-e 27b4066ca4 Merge branch 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.7/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 2ee5b356fa Merge branch 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.8/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 233955a0d3 Merge branch 'w/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.6/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:44 +00:00
bert-e ab51522110 Merge branch 'improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/7.70/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:44 +00:00
Rahul Padigela b1b2d2ada6 improvement CLDSRV-466 add timestamp for exceptions 2023-11-10 08:17:34 -08:00
bert-e f5d3433413 Merge branches 'w/8.8/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.8 2023-11-09 17:31:36 +00:00
bert-e 62b4b9bc25 Merge branches 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.7 2023-11-09 17:31:35 +00:00
bert-e ce4b2b5a27 Merge branches 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/7.70/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.6 2023-11-09 17:31:34 +00:00
bert-e 96bd67ee60 Merge branch 'improvement/CLDSRV-464/support_mpu_scuba' into q/7.70 2023-11-09 17:31:34 +00:00
bert-e ec56c77881 Merge branch 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.8/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e d0abde3962 Merge branch 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.7/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e f08a3f434b Merge branch 'improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.6/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:06 +00:00
bert-e fdc682f2db Merge branches 'w/8.8/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.7/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.8 2023-11-07 09:32:43 +00:00
bert-e b184606dc2 Merge branches 'w/8.7/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.6/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.7 2023-11-07 09:32:43 +00:00
bert-e 172ec4a714 Merge branches 'w/8.6/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/7.70/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.6 2023-11-07 09:32:42 +00:00
bert-e ae770d0d3f Merge branch 'improvement/CLDSRV-424-apicall-auth-update' into q/7.10 2023-11-07 09:32:41 +00:00
bert-e 7d2613e9a3 Merge branches 'w/7.70/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/7.10/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/7.70 2023-11-07 09:32:41 +00:00
Maha Benzekri 9ce0f2c2b6
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-424-apicall-auth-update' into w/8.8/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:20:41 +01:00
Maha Benzekri 43b4e0c713
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-424-apicall-auth-update' into w/8.7/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:18:48 +01:00
Maha Benzekri 2bda761518
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-424-apicall-auth-update' into w/8.6/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:16:48 +01:00
Maha Benzekri bfc9ca68c9
Merge remote-tracking branch 'origin/improvement/CLDSRV-424-apicall-auth-update' into w/7.70/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:13:45 +01:00
Maha Benzekri 6abb0d96a9
CLDSRV-424:CLDSRV version bump
Update lib/api/api.js

Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2023-11-07 09:06:23 +01:00
Maha Benzekri 733f424a4b
CLDSRV-424:ARSN version bump 2023-11-03 12:39:09 +01:00
Will Toozs 8d4ff7df5f
CLDSRV-424: api call updated with implicit deny logic
change variable names for clarity

edit: update arsenal package
2023-11-03 12:39:01 +01:00
Taylor McKinnon 59b87479df possible => unsupported 2023-11-01 10:17:20 -07:00
Taylor McKinnon 967ab966fa impr(CLDSRV-464): Add owner-id to mpu part metadata 2023-11-01 09:06:20 -07:00
Taylor McKinnon 212c7f506c impr(CLDSRV-464): Pass overhead fields for complete MPU and subsequent part bath delete 2023-11-01 09:04:32 -07:00
Taylor McKinnon 1e9ee0ef0b bf(CLDSRV-463): Strictly validate checksum algorithm headers 2023-10-30 10:54:34 -07:00
bert-e 9185f16554 Merge branch 'w/8.7/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.8/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
bert-e 2df9a57f9c Merge branch 'w/8.6/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.7/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
Nicolas Humbert c96706ff28 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-462/tags' into w/8.6/bugfix/CLDSRV-462/tags 2023-10-25 20:42:14 +02:00
Nicolas Humbert daa6f46b14 Merge remote-tracking branch 'origin/bugfix/CLDSRV-462/tags' into w/7.70/bugfix/CLDSRV-462/tags 2023-10-25 20:21:40 +02:00
Nicolas Humbert 44315057df CLDSRV-462 bump project version 2023-10-25 19:59:47 +02:00
Nicolas Humbert 61fe64a3ac CLDSRV-462 Expiration header is not compatible with legacy object md
Before the Object Metadata refactor done around May 31, 2017 (c22e44f63d), if no tags were set, the object tag was stored as undefined.

After the commit, if no tags are set, the object tag is stored as an empty object '{}'.

When the expiration response headers were implemented on 812b09afef around Nov 22, 2021, the empty object was handled, but not the undefined tag logic, which made the expiration response headers not backward compatible.

We need to address both cases: the undefined property and the empty object '{}'.
2023-10-25 19:59:07 +02:00
bert-e 68535f83d6 Merge branches 'w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.8 2023-10-24 18:40:33 +00:00
bert-e 41d63650be Merge branches 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.7 2023-10-24 18:40:32 +00:00
bert-e 4ebb5d449a Merge branches 'w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/7.70/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.6 2023-10-24 18:40:32 +00:00
bert-e 48abedc6f7 Merge branch 'bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into q/7.70 2023-10-24 18:40:31 +00:00
bert-e 12185f7c3b Merge branches 'w/8.8/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.7/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.8 2023-10-19 20:36:18 +00:00
bert-e 5f82ee2d0e Merge branches 'w/8.7/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.6/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.7 2023-10-19 20:36:18 +00:00
bert-e 7e0f9c63fe Merge branches 'w/8.6/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/7.70/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.6 2023-10-19 20:36:17 +00:00
bert-e 9f5ac17357 Merge branch 'improvement/CLDSRV-449/pass_overhead_fields' into q/7.70 2023-10-19 20:36:17 +00:00
Taylor McKinnon d72bc5c6b9 Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-449/pass_overhead_fields' into w/8.8/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 13:16:26 -07:00
Taylor McKinnon 0e47810963 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-449/pass_overhead_fields' into w/8.7/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 12:40:23 -07:00
Taylor McKinnon 8d83546ee3 Merge remote-tracking branch 'origin/improvement/CLDSRV-449/pass_overhead_fields' into w/8.6/improvement/CLDSRV-449/pass_overhead_fields 2023-10-16 12:01:03 -07:00
Taylor McKinnon fff4fd5d22 impr(CLDSRV-449): Add unit tests for overheadField param 2023-10-16 11:13:49 -07:00
Taylor McKinnon 1016a6826d impr(CLDSRV-449): Pass overheadField to Metadata in API handlers 2023-10-16 11:13:49 -07:00
bert-e 3b36cef85f Merge branch 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 18:57:46 +00:00
Jonathan Gramain 114b885c7f Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:35:23 -07:00
Jonathan Gramain e56d4e3744 Merge remote-tracking branch 'origin/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:05:32 -07:00
Jonathan Gramain 15144e4adf CLDSRV-458 bump cloudserver version 2023-10-11 11:03:02 -07:00
Jonathan Gramain 3985e2a712 bf: CLDSRV-458 fix bucketd params on null version update
On in-place updates of "legacy" null versions (those without the
"isNull2" attribute, using the "nullVersionId" chain instead of null
keys), we mustn't pass the "isNull" query parameter when sending the
update request to bucketd. Otherwise, it creates a null key which
causes issues when deleting the null version later.

Use a helper to pass the right set of parameters in all request types
that update versions in-place.
2023-10-11 10:59:56 -07:00
williamlardier 3b95c033d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal' into w/8.8/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal 2023-10-06 17:59:22 +02:00
williamlardier 04091dc316 CLDSRV-457: bump project version 2023-10-06 14:54:35 +02:00
williamlardier 56023a80ed CLDSRV-457: bump arsenal 2023-10-06 14:54:34 +02:00
bert-e 2deaebd89a Merge branch 'w/8.7/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.8/bugfix/CLDSRV-455/skip 2023-10-05 16:41:46 +00:00
bert-e c706ccf9c6 Merge branch 'w/8.6/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.7/bugfix/CLDSRV-455/skip 2023-10-05 16:41:45 +00:00
Nicolas Humbert 4afb2476f8 Merge remote-tracking branch 'origin/bugfix/CLDSRV-455/skip' into w/8.6/bugfix/CLDSRV-455/skip 2023-10-05 18:21:54 +02:00
Nicolas Humbert 91a7e7f24f CLDSRV-455 orphan delete marker list interruption skips processed key
The key marker in the orphan delete marker listing response should match the last key in the response's key array.
This ensures that the next listing begins after the key that has already been returned.
2023-10-05 15:55:45 +02:00
Taylor McKinnon 2f344cde70 impr(CLDSRV-449): Pass overheadField through helper functions to MetadataWrapper 2023-10-04 15:04:10 -07:00
Taylor McKinnon ad154085ac impr(CLDSRV-449): Use correct method in log message 2023-10-04 15:04:10 -07:00
Francois Ferrand 583ea8490f
Bump 8.8.3
Issue: CLDSRV-454
2023-10-04 11:18:25 +02:00
bert-e 85a9480793 Merge branch 'w/8.8/improvement/CLDSRV-446/bump' into tmp/octopus/q/8.8 2023-10-03 10:44:50 +00:00
bert-e be2f65b69e Merge branch 'bugfix/CLDSRV-423-test-sproxyd' into q/8.8 2023-10-03 10:12:16 +00:00
bert-e 1ee6d0a87d Merge branch 'w/8.7/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.8/improvement/CLDSRV-446/bump 2023-10-02 15:25:13 +00:00
bert-e 224af9a5d2 Merge branch 'w/8.6/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-446/bump 2023-10-02 15:25:12 +00:00
Nicolas Humbert 9e2ad48c5c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/bump' into w/8.6/improvement/CLDSRV-446/bump 2023-10-02 17:12:32 +02:00
Nicolas Humbert 780971ce10 CLDSRV-446 bump version 2023-10-02 17:08:41 +02:00
bert-e 74f05377f0 Merge branch 'w/8.7/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.8/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:08 +00:00
bert-e 111e14cc89 Merge branch 'w/8.6/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.7/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:07 +00:00
Nicolas Humbert fd6fb5a26c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/listing-scanned-limit' into w/8.6/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 15:30:02 +02:00
Nicolas Humbert 8df540dcc1 CLDSRV-446 Limiting entries scanned during the lifecycle listing 2023-10-02 15:08:37 +02:00
Florent Monjalet 00b20f00d1 Merge remote-tracking branch 'origin/development/8.8' into bugfix/CLDSRV-423-test-sproxyd 2023-10-02 13:45:58 +02:00
Florent Monjalet a91d53a12c CLDSRV-423: test distinct and overwriting PUTs 2023-09-27 11:58:20 +02:00
Florent Monjalet 63d2637046 CLDSRV-423: improve async series usage in test 2023-09-27 11:50:44 +02:00
Maha Benzekri 5d416ad190
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-444-id-resource-policy' into w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:49:03 +02:00
Maha Benzekri ff29cda03f
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-444-id-resource-policy' into w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:47:33 +02:00
Maha Benzekri 5685b2e972
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:45:19 +02:00
Maha Benzekri 4e4ea2ab84
Merge remote-tracking branch 'origin/bugfix/CLDSRV-444-id-resource-policy' into w/7.70/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:43:20 +02:00
Florent Monjalet cb8baf2dab CLDSRV-423: provide a proper dockerfile for test sproxyd 2023-09-27 11:36:49 +02:00
Maha Benzekri 67e5694d26
CLDSRV-447:CLDSRV version bump 2023-09-27 11:23:26 +02:00
bert-e 22f470c6eb Merge branch 'w/8.7/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:17 +00:00
bert-e e510473116 Merge branch 'w/8.6/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:16 +00:00
Maha Benzekri d046e8a294
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 10:27:47 +02:00
Maha Benzekri 20a730788a
Merge remote-tracking branch 'origin/bugfix/CLDSRV-444-id-resource-policy' into w/7.70/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 10:15:37 +02:00
Maha Benzekri 47958591ec
CLDSRV-447:ARSN version bump
fixes after reviews
2023-09-26 16:13:14 +02:00
Maha Benzekri 4195b6ae6a
CLDSRV-447:Test add for principal && arsn bump 2023-09-25 15:13:58 +02:00
Maha Benzekri feefd13b68
CLDSRV-444: unit test with Id and arsenal version 2023-09-12 22:51:04 +02:00
Florent Monjalet 17a6808fe4 CLDSRV-423: bump arsenal and sproxydclient to fix SPRXCLT-12 2023-08-31 19:07:44 +02:00
Florent Monjalet df646e4802 CLDSRV-423: disable failing tests that have just been reenabled
They had been disabled for a long while and cannot be reenabled yet
because they don't pass, so keep on skipping them for now.

Tickets have been created to take care of them:

- CLDSRV-440
- CLDSRV-441
- CLDSRV-442
- CLDSRV-443
2023-08-31 19:06:34 +02:00
Florent Monjalet 267770d256 CLDSRV-423: reproduce SPRXCLT-12 more often 2023-08-31 19:06:34 +02:00
Florent Monjalet 1b92dc2c05 CLDSRV-423: perform two successive put in multiple backend tests
This tests for SPRXCLT-12 issue
2023-08-31 19:06:34 +02:00
Florent Monjalet f80bb2f34b CLDSRV-423: don't run sproxyd test when testing Ceph 2023-08-31 19:06:34 +02:00
Florent Monjalet 4f89b67bb9 CLDSRV-423: Add missing mock logger method 2023-08-31 19:06:34 +02:00
Florent Monjalet 8b5630923c CLDSRV-423: refactor multiple backend put tests to avoid duplication 2023-08-31 19:06:34 +02:00
Florent Monjalet 9ff5e376e5 CLDSRV-423: reenable a good chunk of multiple backend tests 2023-08-31 19:06:34 +02:00
Florent Monjalet a9b5a2e3a4 CLDSRV-423: add put test for sproxyd 2023-08-31 19:06:34 +02:00
Florent Monjalet 7e9ec22ae3 CLDSRV-423: deploy sproxyd for multiple backend tests 2023-08-31 19:06:34 +02:00
bert-e 9d4664ae06 Merge branch 'w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.8/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e 662265ba2e Merge branch 'w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e c7da82dda7 Merge branch 'w/7.70/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:29 +00:00
Taylor McKinnon 960b4b2dd4 Merge remote-tracking branch 'origin/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into w/7.70/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 09:41:53 -07:00
Taylor McKinnon 1e9af343b9 bf(CLDSRV-439): Bump version to 7.10.30 2023-08-30 09:25:48 -07:00
Taylor McKinnon 8bb7338080 bf(CLDSRV-439): Bump arsenal to 7.10.47 2023-08-30 09:24:17 -07:00
Taylor McKinnon 17e4f14f9c Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-413/bump_version' into w/8.8/bugfix/CLDSRV-413/bump_version 2023-08-18 10:10:01 -07:00
Taylor McKinnon 014b071536 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-413/bump_version' into w/8.7/bugfix/CLDSRV-413/bump_version 2023-08-18 10:07:14 -07:00
Taylor McKinnon 9130f323d4 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/bump_version' into w/8.6/bugfix/CLDSRV-413/bump_version 2023-08-18 10:05:33 -07:00
Taylor McKinnon c09d3282dc Merge remote-tracking branch 'origin/bugfix/CLDSRV-413/bump_version' into w/7.70/bugfix/CLDSRV-413/bump_version 2023-08-18 09:54:43 -07:00
Taylor McKinnon fb9175579f bf(CLDSRV-413): Bump cloudserver version 2023-08-18 09:44:53 -07:00
bert-e 2d45f92ae1 Merge branches 'w/8.8/feature/CLDSRV-420/backport' and 'q/5268/8.7/feature/CLDSRV-420/backport' into tmp/octopus/q/8.8 2023-08-18 14:53:18 +00:00
bert-e 48452496fa Merge branches 'w/8.7/feature/CLDSRV-420/backport' and 'q/5268/8.6/feature/CLDSRV-420/backport' into tmp/octopus/q/8.7 2023-08-18 14:53:18 +00:00
bert-e b89773eba6 Merge branch 'q/5268/7.70/feature/CLDSRV-420/backport' into tmp/normal/q/8.6 2023-08-18 14:53:18 +00:00
bert-e c738e0924e Merge branch 'feature/CLDSRV-420/backport' into q/7.70 2023-08-18 14:53:16 +00:00
bert-e 18bf6b8d4a Merge branch 'w/8.7/feature/CLDSRV-420/backport' into tmp/octopus/w/8.8/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
bert-e 858c31a542 Merge branch 'w/8.6/feature/CLDSRV-420/backport' into tmp/octopus/w/8.7/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
Nicolas Humbert 75a759de27 Merge remote-tracking branch 'origin/feature/CLDSRV-420/backport' into w/8.6/feature/CLDSRV-420/backport 2023-08-18 12:57:48 +02:00
bert-e 19d3e0bc9d Merge branch 'w/8.7/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.8/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:03:00 +00:00
bert-e bac044dc8f Merge branch 'w/8.6/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.7/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:02:59 +00:00
Taylor McKinnon 8f77cd18c8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/crr_existing_null_version' into w/8.6/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 10:02:29 -07:00
bert-e cb7609b173 Merge branch 'bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/7.70/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:00:57 +00:00
Taylor McKinnon 2926048735 bf(CLDSRV-413): Set isNull in objectMD fin backbeat putMetadata route for current null versions 2023-08-17 09:59:58 -07:00
Taylor McKinnon 656ef3fcee bf(CLDSRV-413): improve backbeat route testing setup cleanup 2023-08-16 14:25:31 -07:00
bert-e 8c0f709014 Merge branch 'bugfix/CLDSRV-422' into tmp/octopus/w/8.8/bugfix/CLDSRV-422 2023-08-16 11:46:43 +00:00
Francois Ferrand ce92d33a5d
Fix use of http_requests_total metrics
It was missed when metric names were updated. In addition, the dashboard
was not up-to-date with the python source, and needed to be regenerated.

Issue: CLDSRV-422
2023-08-14 14:36:14 +02:00
Kerkesni 0381cce85c
Merge remote-tracking branch 'origin/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring' into w/8.8/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring 2023-08-10 16:07:42 +02:00
Kerkesni 20a08a2a4e
bump version to 8.7.26 2023-08-10 16:04:25 +02:00
Kerkesni ff73d8ab12
add tests for keeping object properties after restore
Issue: CLDSRV-408
2023-08-10 16:03:58 +02:00
Kerkesni 1ee44bc6d3
keep same object properties after a restore of a cold object
Object properties such as ACLs and custom user metadata should
not be removed after the restore of a cold object.

Issue: CLDSRV-408
2023-08-10 12:58:18 +02:00
bert-e 614e876536 Merge branches 'w/8.8/improvement/CLDSRV-400' and 'q/5191/8.7/improvement/CLDSRV-400' into tmp/octopus/q/8.8 2023-08-09 16:42:42 +00:00
bert-e b40a77d94b Merge branch 'improvement/CLDSRV-400' into q/8.7 2023-08-09 16:42:42 +00:00
bert-e 3a3a73b756 Merge branch 'improvement/CLDSRV-400' into tmp/octopus/w/8.8/improvement/CLDSRV-400 2023-08-09 16:19:33 +00:00
Nicolas Humbert 6789959109 CLDSRV-420 add us-east-2 location 2023-08-09 10:14:47 -04:00
Nicolas Humbert bf9b53eea9 CLDSRV-420 fix linter 2023-08-09 10:14:47 -04:00
Nicolas Humbert aa04d23e68 CLDSRV-420 test v1 and v0 bucket format 2023-08-09 10:14:47 -04:00
Nicolas Humbert e08aaa7bcc CLDSRV-412 Test null version in Lifecycle list of non-current versions
(cherry picked from commit 98f855f997)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 1d9c44126a CLDSRV-375 Exclude already transitioned keys from lifecycle listings
(cherry picked from commit 4c189b2d9e)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 70a28ab620 CLDSRV-420 fix import metrics 2023-08-09 10:14:47 -04:00
Nicolas Humbert 550451eefa CLDSRV-372 Current lifecycle versions should include version id
(cherry picked from commit f7f77c6cd2)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 96befd3f28 CLDSRV-371 ETag should be surrounded by double quotes
(cherry picked from commit f20bf1becf)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 75288f1b56 CLDSRV-366 Clear list orphan delete markers response
(cherry picked from commit f2292f1ca3)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 6847f2b0c4 CLDSRV-363 ETag instead of Etag for lifecycle listings Contents
(cherry picked from commit 049f52bf95)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 050059548e CLDSRV-317 Implement listLifecycleOrphans
(cherry picked from commit ec9ed94555)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 9b2a557a05 CLDSRV-316 Implement listLifecycleNonCurrents
(cherry picked from commit 41cc399d85)
2023-08-09 10:14:47 -04:00
Nicolas Humbert 7a7e2f4b91 CLDSRV-314 Implement listLifecycleCurrents
(cherry picked from commit 6b8a2581b6)
2023-08-09 10:14:42 -04:00
bert-e 3f6e85590d Merge branches 'w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.8 2023-08-07 17:27:19 +00:00
bert-e de589a07e8 Merge branches 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.6 2023-08-07 17:27:18 +00:00
bert-e bc009945d2 Merge branches 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.7 2023-08-07 17:27:18 +00:00
bert-e 8db04f4486 Merge branches 'w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/7.10/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/7.70 2023-08-07 17:27:17 +00:00
bert-e 328b7bc335 Merge branch 'bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into q/7.10 2023-08-07 17:27:17 +00:00
bert-e 3ac30d9bab Merge branch 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e 32204fbfbf Merge branch 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e b1eda2a73a Merge branch 'w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:45 +00:00
bert-e 0249ad9bcf Merge branch 'bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:45 +00:00
bert-e 5a26e1a80d Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 507a2d4ff5 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 8cdd35950b Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:26:59 +00:00
bert-e bfa366cd27 Merge branch 'improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:26:59 +00:00
Dimitrios Vasilas d132757696 CLDSRV-411: Add tests for imposing last-modified in testing mode 2023-07-20 09:58:09 +02:00
Alexander Chan 2a4be31b8a CLDSRV-196: create new werelogs object over using global werelogs
(cherry picked from commit 7fd547db24)
2023-07-19 12:25:55 -07:00
bert-e 1207a6fb70 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e 5883286864 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e b206728342 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:27 +00:00
bert-e 347a7391b9 Merge branch 'improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:27 +00:00
Dimitrios Vasilas 6273eebe66
CLDSRV-411: Use method to set last-modified
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2023-07-19 10:45:17 +02:00
bert-e 2a37e809d9 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:22 +00:00
bert-e 86ce7691cd Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
bert-e c04f663480 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
Dimitrios Vasilas f2493e982f CLDSRV-411: Remove double import 2023-07-18 14:15:44 +02:00
bert-e e466b5e92a Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:16 +00:00
bert-e a4bc10f730 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
bert-e e826033bf0 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
Dimitrios Vasilas c23dad6fb8 Merge remote-tracking branch 'origin/improvement/CLDSRV-411-impose-last-modified' into w/7.70/improvement/CLDSRV-411-impose-last-modified 2023-07-17 18:50:30 +02:00
Dimitrios Vasilas 5fcdaa5a97 CLDSRV-411: Add mechanism for imposing the last-modified
When the configuration parameter "testingMode" is set to true,
a put can specify a custom last-modified date using the header
x-amz-meta-x-scal-last-modified.

This is intended to be used in tests only.
2023-07-17 18:40:18 +02:00
Dimitrios Vasilas 9f61ef9a3b CLDSRV-411: Add testing mode 2023-07-17 12:05:16 +02:00
Nicolas Humbert c480301e95 Merge remote-tracking branch 'origin/improvement/CLDSRV-414/bump' into w/8.8/improvement/CLDSRV-414/bump 2023-07-14 15:52:57 -04:00
Nicolas Humbert 276be285cc CLDSRV-414 bump version 2023-07-14 15:47:13 -04:00
bert-e 897d41392a Merge branch 'w/8.7/bugfix/CLDSRV-412/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-412/null 2023-07-14 14:08:38 +00:00
bert-e f4e3a19d61 Merge branch 'bugfix/CLDSRV-412/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-412/null 2023-07-14 14:08:37 +00:00
Nicolas Humbert ee84a03d2c bump arsenal 2023-07-14 09:49:30 -04:00
Nicolas Humbert 98f855f997 CLDSRV-412 Test null version in Lifecycle list of non-current versions 2023-07-14 09:48:47 -04:00
williamlardier 7c52fcbbb0
CLDSRV-402: bump project version 2023-07-13 17:45:06 +02:00
bert-e da52688a39 Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:26 +00:00
bert-e 1cb54a66f8 Merge branch 'w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:25 +00:00
williamlardier 0bb61ddb5b
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 17:12:05 +02:00
williamlardier 68e4b0610a
CLDSRV-402: bump project version 2023-07-13 17:10:06 +02:00
bert-e d9fffdad9e Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 13:08:26 +00:00
williamlardier 389c32f819
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:06:34 +02:00
williamlardier c2df0bd3eb
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 14:22:48 +02:00
williamlardier af0436f1cd
CLDSRV-402: bump project version 2023-07-13 09:54:35 +02:00
williamlardier f7593d385e
CLDSRV-402: bump arsenal dependency 2023-07-13 09:54:35 +02:00
williamlardier 84068b205e
CLDSRV-402: test multi object delete optimization 2023-07-13 09:54:34 +02:00
williamlardier 9774d31b03
CLDSRV-402: optimize multideleteobject API
- Parallelism is increased to reduce the latency
- If the backend supports it, batching is used
- Batch the deletion of objects from storage
- Flag to disable or enable the optimization, as well as
  a way to tune it.
2023-07-13 09:54:34 +02:00
Kerkesni d26b8bcfcc
test keeping same storage class when restoring a cold object
Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
Kerkesni e4634621ee
keep storage class as cold for restored objects
To be compliant with the AWS S3 standard, the storage class
of restored objects should be left as cold location

Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
williamlardier 0b58b3ad2a
CLDSRV390: bump mongodb to 4.4 2023-06-22 16:56:53 +02:00
bert-e 652bf92536 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:43 +00:00
bert-e c5b1ef63ee Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:42 +00:00
bert-e 227de16bca Merge branch 'improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:41 +00:00
williamlardier c57a6e3c57
CLDSRV-409: fix s3cmd test 2023-06-22 15:14:24 +02:00
bert-e 344ee8a014 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
bert-e 5d7a434306 Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
williamlardier 852ae72a13
Merge remote-tracking branch 'origin/improvement/CLDSRV-409-fix-python-version' into w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:43:18 +02:00
williamlardier 507782bd17
CLDSRV-409: remove virtualenv 2023-06-22 14:42:09 +02:00
bert-e b7e7f65d52 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:37:53 +00:00
williamlardier d00320a8ba
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-409-fix-python-version' into w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:36:28 +02:00
williamlardier 4cf07193d9
Merge remote-tracking branch 'origin/improvement/CLDSRV-409-fix-python-version' into w/7.70/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:34:03 +02:00
williamlardier aef272ea3c
CLDSRV-409: remove python 2.7 2023-06-22 14:30:43 +02:00
williamlardier 31d1734d5c
CLDSRV-409: use latest s3cmd with python3 2023-06-22 13:52:14 +02:00
bert-e c5b7450a4d Merge branches 'w/8.7/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.7 2023-06-12 22:01:22 +00:00
bert-e eb5affdced Merge branches 'w/8.6/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.6 2023-06-12 22:01:21 +00:00
bert-e cdaf6db929 Merge branches 'w/8.5/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.5 2023-06-12 22:01:21 +00:00
bert-e 91ada795d0 Merge branches 'w/7.70/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/7.70 2023-06-12 22:01:20 +00:00
bert-e 2b420a85e0 Merge branch 'w/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/7.10 2023-06-12 22:01:19 +00:00
Nicolas Humbert 18c8d4ecac CLDSRV-404 bump version 2023-06-09 11:48:56 -04:00
Nicolas Humbert c8150c6857 CLDSRV-397 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-08 12:14:36 -04:00
bert-e 399a2a53ab Merge branch 'improvement/CLDSRV-399/addWorkflowDispatch' into q/8.7 2023-06-05 20:39:18 +00:00
Alexander Chan bbad049b5f CLDSRV-399: add workflow_dispatch 2023-06-05 11:30:35 -07:00
bert-e 2a4e2e1584 Merge branch 'w/8.6/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-398/bump 2023-06-02 20:19:28 +00:00
bert-e 08e43f5084 Merge branch 'w/8.5/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.6/improvement/CLDSRV-398/bump 2023-06-02 20:19:27 +00:00
Nicolas Humbert cc153c99d6 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-398/bump' into w/8.5/improvement/CLDSRV-398/bump 2023-06-02 15:58:58 -04:00
Nicolas Humbert d3f9870389 Merge remote-tracking branch 'origin/improvement/CLDSRV-398/bump' into w/7.70/improvement/CLDSRV-398/bump 2023-06-02 15:14:08 -04:00
Nicolas Humbert 0fa264693d CLDSRV-398 bump version 2023-06-02 15:05:47 -04:00
bert-e b304d05614 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:27 +00:00
bert-e 751f6ce559 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
bert-e 0330597679 Merge branch 'w/7.70/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
Nicolas Humbert 27cacc9552 CLDSRV-396 add nullVersionCompatMode condition 2023-06-02 14:14:05 -04:00
bert-e 004bd63368 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:33 +00:00
bert-e e047ae6fbb Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:32 +00:00
Nicolas Humbert ebca8dd05e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 12:09:50 -04:00
bert-e 52535fb44b Merge branch 'bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/7.70/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 15:56:03 +00:00
Nicolas Humbert 1ed32b2cae CLDSRV-396 If put metadata for a null version, set options.isNull to true 2023-06-02 11:55:35 -04:00
Nicolas Humbert 960d736962 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-396/put-metadata-null' into w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 11:24:34 -04:00
bert-e 11098dd113 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 15:13:36 +00:00
Nicolas Humbert 9cc7362fbd Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 08:27:34 -04:00
KillianG 32401c9a83
bump 8.7.23 2023-05-30 09:40:36 +00:00
KillianG 5f05b676cc
Merge remote-tracking branch 'origin/development/8.7' into HEAD 2023-05-26 09:46:21 +00:00
KillianG fd662a8c2c
Bump arsenal 8.1.101 and test delete markers are not listed when bucket versionning is suspended
Issue: CLDSRV-347
2023-05-26 08:46:42 +00:00
bert-e a843d53939 Merge branch 'bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/7.70/bugfix/CLDSRV-396/put-metadata-null 2023-05-25 21:48:52 +00:00
Nicolas Humbert f889100798 CLDSRV-396 putMetadata API route is not updating null version properly 2023-05-25 17:42:47 -04:00
bert-e 5d54dd58be Merge branch 'bugfix/CLDSRV-393' into q/8.7 2023-05-25 19:47:24 +00:00
Nicolas Humbert 1bd0deafcf CLDSRV-395 bump to 8.7.21 2023-05-25 14:02:47 -04:00
Francois Ferrand 7c788d3dbf Bump github actions
Issue: CLDSRV-393
2023-05-25 14:02:47 -04:00
Nicolas Humbert 50cb6a2bf1 CLDSRV-374 putMetadata API route is not updating null version properly
Instead of using the provided "null" value, the metadata "null version id" is now used when updating the metadata of a null version.
2023-05-25 09:40:20 -04:00
bert-e 58f7bb2877 Merge branch 'w/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.7/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e f899337284 Merge branch 'w/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.6/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e b960a913ec Merge branch 'w/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.5/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:12 +00:00
bert-e 5436c0698e Merge branch 'w/7.10/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/7.70/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:12 +00:00
bert-e 3ff7856a94 Merge branch 'improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/7.10/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:11 +00:00
gaspardmoindrot 57fb5f1403 [CLDSRV-388] Implement GHAS 2023-05-24 22:39:31 +00:00
Francois Ferrand ea284508d7
Update x-amz-restore when updating the expiry date
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 0981fa42f3
Add version name in release runs
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 7e63064a52
Bump github actions
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 71b21e40ca
Add eslint rule to prevent exclusive tests
Lint will fail if it finds any `describe.only` or `it.only`.

Issue: CLDSRV-393
2023-05-24 17:14:24 +02:00
Francois Ferrand ff894bb545
Remove describe.only
This should never have been commited, as it disables most unit tests from
CI.

This caused some tests to actually fail:
* bad import of refactored `objectDelete` api
* getting an object while transitioning (archiving) is allowed

Issue: CLDSRV-393
2023-05-24 17:09:33 +02:00
Francois Ferrand ae9f24e1bb
Update expiry date on s3:restore on restored object
If the object is already restored, we simply need to update the expiry
date, as per AWS docs:
> After restoring an archived object, you can update the restoration
> period by reissuing the request with a new period. Amazon S3 updates
> the restoration period relative to the current time.

Issue: CLDSRV-393
2023-05-24 16:52:45 +02:00
bert-e 2dc01ce3ed Merge branch 'w/8.7/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/q/8.7 2023-05-15 16:39:05 +00:00
Kerkesni 9bd9bef6c7
bump version in package.json to 8.7.20
Issue: CLDSRV-386
2023-05-11 10:34:27 +02:00
bert-e 3a8bbefb6c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
bert-e a6a5c273d5 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
Dimitrios Vasilas c329d9684d Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 16:23:51 +02:00
bert-e ec5baf1f85 Merge branch 'improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/7.70/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:23:30 +00:00
Dimitrios Vasilas d844fb4fa1 CLDSRV-370: Push non-Federation image as cloudserver/cloudserver:<tag> 2023-05-10 16:19:18 +02:00
Kerkesni 6479076fec
bump node version to 16.20 in Dockerfile
Issue: CLDSRV-386
2023-05-10 13:35:54 +02:00
bert-e c436e2657c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
bert-e df45f481d0 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
Dimitrios Vasilas 406f3f0093 Revert "CLDSRV-370: docker-entrypoint: make bucketd bootstrap configurable"
This reverts commit 1d76f61d88.
2023-05-09 19:19:47 +02:00
Dimitrios Vasilas 6952b91539 CLDSRV-370: Pin virtualenv version to 20.21.0
Virtualenv setup fails with the latest version (20.23)
2023-05-09 19:18:40 +02:00
Dimitrios Vasilas eea1ebb5ec Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 19:17:35 +02:00
bert-e dae5b7dc28 Merge branch 'improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/7.70/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:12:54 +00:00
Dimitrios Vasilas 1d76f61d88 CLDSRV-370: docker-entrypoint: make bucketd bootstrap configurable 2023-05-09 19:07:50 +02:00
Dimitrios Vasilas 8abe809141 CLDSRV-370: Build dev docker image on release 2023-05-09 19:07:50 +02:00
Dimitrios Vasilas 94b14a258e CLDSRV-370: Pin virtualenv version to 20.21
Virtualenv setup fails with the latest version (20.23)
2023-05-09 19:07:46 +02:00
bert-e cd8c589eba Merge branch 'improvement/CLDSRV-375/exclude-keys' into tmp/octopus/w/8.7/improvement/CLDSRV-375/exclude-keys 2023-04-28 18:20:48 +00:00
williamlardier daec2661ae
CLDSRV-385: use mongodb v4.2 for the CI 2023-04-21 15:03:01 +02:00
Francois Ferrand 0f266371a0
Bump version 8.7.18
Issue: CLDSRV-383
2023-04-17 23:36:28 +02:00
Francois Ferrand 73e56963bf
Fix originOp when deleting a version
DeleteMarkerCreated was sent instead of the expect Delete, which breaks
bucket notifications.

Issue: CLDSRV-383
2023-04-17 23:14:49 +02:00
Nicolas Humbert 4c189b2d9e CLDSRV-375 Exclude already transitioned keys from lifecycle listings 2023-04-16 21:54:16 -07:00
Alexander Chan fb11d0f42e Merge remote-tracking branch 'origin/feature/CLDSRV-368/addBackbeatRouteForIndexingOps' into w/8.7/feature/CLDSRV-368/addBackbeatRouteForIndexingOps 2023-04-14 18:35:38 -07:00
Alexander Chan fe6690da92 bump arsenal 2023-04-14 18:08:42 -07:00
williamlardier 9cbd9f7be8
CLDSRV-381: bump project version 2023-04-14 22:29:03 +02:00
williamlardier c2fc8873cb
CLDSRV-381: bump arsenal 2023-04-14 22:28:47 +02:00
Francois Ferrand bee1ae04bf
Bump version 8.7.15
Issue: CLDSRV-380
2023-04-14 09:06:04 +02:00
Francois Ferrand eb86552a57
Allow reading transition-in-progress objects
This “transition in progress” state does not exist in AWS S3 (so we have no reference), and we need to access the data for cold storage framework.

When the transition has been performed, the archive id and storage class will be updated first (as well as clearing the ‘transitioning’ flag) before triggering the “GC” to remove the (local) data.

So we are sure that data is available in this state, and that simply checking that the object is in cold storage is enough.

Issue: CLDSRV-380
2023-04-14 09:02:32 +02:00
Alexander Chan 80fbf78d62 CLDSRV-368: add indexing routes 2023-04-13 15:17:03 -07:00
bert-e f5d8f2fac5 Merge branch 'w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.7/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
bert-e b1e13d6efa Merge branch 'w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
Jonathan Gramain e7ef437b27 Merge remote-tracking branch 'origin/feature/CLDSRV-359-passGetDeleteMarkerFlag' into w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 11:42:08 -07:00
Jonathan Gramain 55f652ecc4 CLDSRV-359 bump cloudserver version 2023-04-13 11:40:29 -07:00
Jonathan Gramain 77f56d1fa1 feat: CLDSRV-359 pass getDeleteMarker flag to metadata when needed
Pass the `getDeleteMarker` flag to the Metadata backend when the
Cloudserver operation requires to distinguish if the target is a
delete marker or does not exist, to set response header
"x-amz-delete-marker" or return a specific error code.
2023-04-13 11:39:36 -07:00
bert-e 36e841b542 Merge branches 'w/8.7/feature/CLDSRV-355-activateNullKeys' and 'q/5069/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.7 2023-04-13 18:35:42 +00:00
bert-e a2404ed622 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.6 2023-04-13 18:35:41 +00:00
williamlardier 1d12a430a0
CLDSRV-376: bump to 8.7.13 2023-04-13 16:59:28 +02:00
williamlardier bea27b4fb4
CLDSRV-376: update mongoClient used in functional tests 2023-04-13 16:59:13 +02:00
williamlardier 76405d9179
CLDSRV-376: bump mongodb driver 2023-04-13 16:59:12 +02:00
Alexander Chan 31b7f1e71c bump version 2023-04-12 15:36:00 -07:00
Alexander Chan 8674cac9f8 CLDSRV-379: bump arsenal 2023-04-12 15:35:27 -07:00
KillianG d5b666a246
Better indentation and use bool isExpiration only in the first function, after, uses directly originOp string
Issue: CLDSRV-367
2023-04-11 14:59:02 +00:00
KillianG 4360772971
Improve the way we pass originOp to make it clearer
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 6e152e33d5
Use boolean in parameter instead of hardcoded originOP
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 94f34979a5
add origin op to all delete object calls
Issue: CLDSRV-367
2023-04-11 13:43:36 +00:00
bert-e 4be430c313 Merge branch 'improvement/CLDSRV-372/vid' into q/8.6 2023-04-07 18:35:02 +00:00
bert-e 4b0f165b46 Merge branches 'w/8.7/improvement/CLDSRV-372/vid' and 'q/5109/8.6/improvement/CLDSRV-372/vid' into tmp/octopus/q/8.7 2023-04-07 18:35:02 +00:00
Nicolas Humbert 3590377554 Merge remote-tracking branch 'origin/improvement/CLDSRV-372/vid' into w/8.7/improvement/CLDSRV-372/vid 2023-04-07 07:58:01 -04:00
Nicolas Humbert f7f77c6cd2 CLDSRV-372 Current lifecycle versions should include version id 2023-04-06 19:09:04 -04:00
bert-e 8a08f97492 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:48 +00:00
bert-e a908d09cc8 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:47 +00:00
Jonathan Gramain 170a68a4f8 CLDSRV-355 [8.5+] fixup problematic automerge
Restore missing `require('../Config')` in lib/api/objectDelete.js
2023-04-05 10:57:42 -07:00
bert-e 448afa50e3 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:47 +00:00
bert-e a0fff19611 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:46 +00:00
bert-e 6ad1643ba8 Merge branch 'w/8.4/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:45 +00:00
Jonathan Gramain 5ce253ef62 Merge remote-tracking branch 'origin/feature/CLDSRV-355-activateNullKeys' into w/8.4/feature/CLDSRV-355-activateNullKeys 2023-04-04 17:27:11 -07:00
Jonathan Gramain 72f4c36077 CLDSRV-355 set 'isNull2' attr in copied null key
In order to other logic to detect properly null keys written from
non-compat Cloudservers, we also need to set the 'isNull2' param in
those when we copy them from the master key.
2023-04-04 16:23:25 -07:00
Jonathan Gramain e534af856f feat: CLDSRV-355 activate null keys behavior
Activate the use of null keys in place of null versioned keys by Cloudserver:

- allow processVersioningState() and preprocessingVersioningDelete()
  helpers to return the associated fields for null key handling, which
  tells Cloudserver to set its behavior to create/delete null keys,
  via sending PUT/DELETE requests with `versionId="null"` to the
  Metadata backend

- pass 'isNull' parameter in version-specific requests to hint the
  Metadata backend on what to do (most useful for V1 backend, but also
  to hint V0 backend that it should handle null keys appropriately)

- set "isNull2" metadata attribute when writing a null master, for
  optimization purpose (allows to avoid checking the null versioned
  key on update)
2023-04-04 16:23:25 -07:00
bert-e 5dd8d9057a Merge branch 'w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 50b738cfff Merge branch 'w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 2be3ce21c7 Merge branch 'w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 70ff6fc4ee Merge branch 'feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:48 +00:00
bert-e c5214d19a6 Merge branch 'w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
bert-e 951a98fcaf Merge branch 'w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.7/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
Jonathan Gramain ebb0fed48a Merge remote-tracking branch 'origin/feature/CLDSRV-378-forceEnableNullCompatMode' into w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 15:17:14 -07:00
Jonathan Gramain 5f85c14ab9 CLDSRV-378 [8.x] force null version compat mode
force null version compatibility mode to be enabled, so that
Cloudserver stays compatible with MongoDB backend not supporting null
keys.

Remove the associated aws-sdk functional test suite related to
compatibility mode
2023-04-04 14:39:00 -07:00
bert-e 8ca770dcb7 Merge branch 'w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.7/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:15 +00:00
bert-e 7923977300 Merge branch 'w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:14 +00:00
Jonathan Gramain 3597c146d7 CLDSRV-358 fix issue deleting master null version
Fix an issue that occurred when deleting a null version that was the
current version AND that had a null key. This may happen in various
cases, e.g. if the repair process did repair the master by the null
version, in which case it would not delete the null versioned key
(this has been fixed with null keys).

The fix is to not send the `isNull: true` parameter to Metadata only
because Cloudserver is not in compatibility mode, instead, only send
this parameter if the master key has the `isNull2: true` parameter set
(meaning it was put by a non-compat Cloudserver).
2023-04-04 14:14:56 -07:00
Jonathan Gramain c81e49ba9b CLDSRV-358 fix deletion of null key in null compat mode
A version-specific DELETE of the null version did not work if:

- the request comes from a compat-mode Cloudserver

- the null version had been put by a non-compat mode Cloudserver

To handle this case properly, we look at the "isNull2" field of the
null version fetched, which is only set on non-compat Cloudserver, in
which case we send the "isNull" param to Metadata to instruct to
delete the null key instead of a null versioned key.
2023-04-04 14:14:56 -07:00
Jonathan Gramain e93c064b5f feat: CLDSRV-358 preprocessingVersioningDelete() update for null keys
Add support for null keys in the preprocessingVersioningDelete()
helper, essentially, set a 'isNull' boolean param that gets passed to
Metadata, which tells whether the version to delete is a null version.

NOTE: The null version compatibility mode is still enforced for now
until all pieces are updated to make functional tests pass without the
compatibility mode.
2023-04-04 14:14:55 -07:00
Jonathan Gramain 2b3774600d CLDSRV-358 [test] fix error code checking
In functional tests of 'objectDelete', an "afterAll" cleanup can crash
because it checks an error code before checking if there's an error
object.

It does not crash in normal circumstances because there is an actual
error due to the last unit test trying to clean the bucket, but if
anything changes in the unit tests that leaves the bucket existing
would have triggered this issue.
2023-04-04 14:14:55 -07:00
Jonathan Gramain a6951f2ef8 CLDSRV-358 [cleanup] objectDelete: remove unused assignment
Remove unused assignment of 'deleteInfo.isNull'
2023-04-04 14:14:55 -07:00
Jonathan Gramain 9fb232861f Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 14:03:37 -07:00
Jonathan Gramain 6cf4e291fa Merge remote-tracking branch 'origin/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 13:21:30 -07:00
Jonathan Gramain 06b4320e7d CLDSRV-357 fix behavior change causing issue with mongo backend
When looking for a null version to delete, the code had changed its
behavior: it triggered a delete on the metadata even when the null
version was deleted. While technically not an issue for S3C as
Metadata returns a 200 when attempting to delete a non-existing
version (albeit creating a useless request), the MongoDB backend
returns NoSuchKey error in that case, leading to an issue.

Revert the behavior so to not trigger a delete of the null version
when it does not exist.
2023-04-04 13:17:24 -07:00
bert-e 3585b8d5eb Merge branch 'w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 9331c0a375 Merge branch 'w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 70f368408d Merge branch 'w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
Jonathan Gramain a63762ae71 CLDSRV-357 honor 'delOptions.deleteData' even if master is null
Move check of 'delOptions.deleteData' in prepareNullVersionDeletion()
earlier, so that it is honored even if the master key is a null
version.

This goes with the new possibility to return 'delOptions' without
deleteData in order to delete an existing null key for the master key
(done as part of CLDSRV-353).
2023-04-04 09:52:22 -07:00
Jonathan Gramain f0420572c8 feat: CLDSRV-357 pass deleteNullKey param to backend
Pass the 'deleteNullKey' param that processVersioningState() may set
to the Metadata backend, which tells it to delete the null key as the
PUT is executed.
2023-04-04 09:52:22 -07:00
Jonathan Gramain b1fd915ba3 feat: CLDSRV-357 update versioningPreprocessing() helper for null keys
Modify the code flow of versioningPreprocessing() to support null keys
in addition to the legacy "null versioned keys".

NOTE: The null version compatibility mode is still enforced for now
until all pieces are updated to make functional tests pass without the
compatibility mode.
2023-04-04 09:52:22 -07:00
Jonathan Gramain 4285c18e44 Merge remote-tracking branch 'origin/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 09:49:28 -07:00
Jonathan Gramain 71ffd004df CLDSRV-353 bump arsenal dep 2023-04-04 09:46:07 -07:00
Jonathan Gramain f674104825 CLDSRV-353 + case of delete null versioned key
Add a case in processVersioningState() to delete the null versioned
key where the key is a legacy null version (we know this because
`isNull2` is not set) and we are going to write it as a null key,
because in older Cloudservers there may be an associated null version
as the master null in certain circumstances.
2023-04-03 18:11:15 -07:00
Jonathan Gramain 9c9d4b3e7c CLDSRV-353 remove conversion of null version key to null key
Fix an issue that occurred when converting a null versioned key into a
null key (that would occur when a non-compat mode Cloudserver updates
a compat-mode object having a noncurrent null version). The issue was
that the null key was updated with the master's contents instead of
the original null version contents.

The fix consists of keeping the backward compatibility by setting a
`nullVersionId` instead, which avoids to have to read the null version
metadata first. It is not important to convert those keys as the
migration from V0 to V1 will necessarily have to convert existing
legacy null versions anyway.
2023-04-03 18:11:15 -07:00
Jonathan Gramain 13265a3d6e CLDSRV-353 [optim] no legacy null version key deletion
In case the master has been updated with a null-key-enabled
Cloudserver, there can be no more versioned key associated (as the new
behavior guarantees that a null master cannot have an associated null
versioned key, see S3C-7526).

Thanks to this, we can avoid having to check the versioned key for
deletion when a null master version is updated on a
versioning-suspended bucket, which is arguably a rather common use
case.

To implement that, we will add a "isNull2" attribute to the master key
in a subsequent commit (part of CLDSRV-355) when Cloudserver is not in
null version compatibility mode.

This commit is implementing the optimization by checking the new
"isNull2" metadata attribute, and skipping the null version check in
case the flag is set.
2023-04-03 18:10:07 -07:00
Jonathan Gramain 31c5316a7e feat: CLDSRV-353 processVersioningState() null key support
Add support for null key handling for the helper
processVersioningState(), and maintain the null version compatibility
mode to keep the old behavior - for now, the calling code sets this
flag to "true" without using the config to maintain current behavior,
it will be changed with CLDSRV-355.

One slight behavior change in compatibility mode is that when an old
null versioned key is deleted due to a PUT overwriting the null
version, we do not send the "replayId" to the DELETE request, but
instead, rely on the "oldReplayId" sent by the PUT, because this is
the normal way of letting metadata know how to get rid of the existing
replayId for the existing null version on versioning-suspended buckets.
2023-04-03 18:10:07 -07:00
bert-e 0a1489ee46 Merge branch 'w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.7/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:59 +00:00
bert-e 71f80544ac Merge branch 'w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 270080a75b Merge branch 'w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 74717b2acb Merge branch 'w/7.70/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:57 +00:00
bert-e ef81f3e58f Merge branch 'improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/7.70/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:57 +00:00
Jonathan Gramain aa55a87a65 impr: CLDSRV-373 new func test for S3C-5139
Add a new functional test in aws-node-sdk that reproduces the steps
that formerly triggered S3C-5139, which was silently fixed since it
was opened.

It is useful also for S3C-7352 because it covers a corner case for
both old and new null version handling in v0 and v1 metadata formats.
2023-04-03 09:30:35 -07:00
Xin LI de5b4331e2 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 11:00:24 +02:00
Xin LI e1a4f1ef8c bugfix: CLDSRV-365 bump 2023-03-31 10:58:07 +02:00
bert-e 46dff0321d Merge branch 'w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:07 +00:00
bert-e f3c7580510 Merge branch 'w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
bert-e 2145bb3ae3 Merge branch 'w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
Xin LI 468162c81c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 10:53:21 +02:00
Xin LI 89f9139203 Merge remote-tracking branch 'origin/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/7.70/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 10:48:44 +02:00
Xin LI 8153554a4c bugfix: CLDSRV-365 bump version 2023-03-31 10:46:47 +02:00
Xin LI fb9063bccc bugfix: CLDSRV-365 fix legal hold can be deleted issue and add more tests 2023-03-31 10:46:47 +02:00
bert-e ddc6ea72be Merge branch 'improvement/CLDSRV-371/etag' into tmp/octopus/w/8.7/improvement/CLDSRV-371/etag 2023-03-29 20:22:38 +00:00
Nicolas Humbert f20bf1becf CLDSRV-371 ETag should be surrounded by double quotes 2023-03-29 16:16:52 -04:00
bert-e d31c773e77 Merge branch 'w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e d266ff4e9f Merge branch 'w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.7/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 6ff21996f5 Merge branch 'w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 15d1b3ba86 Merge branch 'w/7.70/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:16 +00:00
bert-e 827c752e9a Merge branch 'improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/7.70/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:15 +00:00
Jonathan Gramain 82dc837610 impr: CLDSRV-369 new version-specific DELETE functional test
Add a new functional test in the aws-node-sdk test suite, that

- creates a nonversioned key

- then enables versioning and creates 5 more versioned keys

- then deletes the 5 versioned keys in a batch delete

- it expects the null version to be the only remaining version

Its primary purpose is to check version-specific DELETE with a null
version in V1 metadata format (MD-847), but it is a good addition to
the existing test suite also in V0 format.
2023-03-28 14:09:10 -07:00
bert-e 7dc2f07cb6 Merge branch 'w/8.7/improvement/CLDSRV-366/clear' into tmp/octopus/q/8.7 2023-03-28 13:25:15 +00:00
Kerkesni 6c22d87c55
bump version to 8.7.11
Issue: CLDSRV-362
2023-03-28 12:25:11 +02:00
Kerkesni 310f67d3a7
throw error when getting a transitioning object
Issue: CLDSRV-362
2023-03-28 12:24:50 +02:00
Kerkesni 49841c5e0e
throw error when copying parts from a cold object
A cold object should not be allowed to get copied as the data
is not accessible.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni b5334baca8
throw error when copying a cold or transitioning object
A cold object should not be allowed to get copied as the data
is not accessible.

Same issue happens when copying an object that is transitioning,
the data might get deleted while copying is still in progress.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni e592671b54
add helper to check if object is in cold storage
Issue: CLDSRV-362
2023-03-28 12:24:48 +02:00
bert-e 6e0b66849d Merge branch 'improvement/CLDSRV-366/clear' into tmp/octopus/w/8.7/improvement/CLDSRV-366/clear 2023-03-28 03:45:02 +00:00
Nicolas Humbert f2292f1ca3 CLDSRV-366 Clear list orphan delete markers response 2023-03-27 15:52:49 -04:00
bert-e 18a1bfd325 Merge branch 'w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.7/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:13 +00:00
bert-e c2b54702f6 Merge branch 'w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:12 +00:00
Jonathan Gramain 13a5e14da5 impr: CLDSRV-356 [8.5+] adapt overwritingVersioning() for archive
Due to the change in what processVersioningState() returns
(nullVersionId embedded in an "extraMD" field for clarity), modify the
overwritingVersioning() helper that needs to have the same contract
than the former function.
2023-03-24 16:37:24 -07:00
Jonathan Gramain 891913fd16 Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState' into w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 15:52:32 -07:00
bert-e 7baa2501e6 Merge branch 'improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 22:50:02 +00:00
Jonathan Gramain 8e808afec9 impr: CLDSRV-356 enhance processVersioningState() and fix replayId
- enhance general flow of processVersioningState(), to make it easier
  to read and update for null key handling

- fix an issue related to passing the uploadId for nonversioned
  buckets (linked to S3C-7361): remove a check "master.isNull" to also
  pass the uploadId as replayId when the master is non-versioned, so
  that it can be deleted by passing it to the metadata DELETE request

- make processVersioningState() return a 'nullVersionId' param rather
  than a "storeOptions", as it is always used to copy master to a null
  version, it simplifies a bit the API

- make processVersioningState() return extra metadata params to set in
  the object ("nullVersionId" and "nullUploadId") in their own
  "extraMD" object, for clarity

- remove undefined params returned by the function to have clean unit
  tests
2023-03-24 12:51:48 -07:00
bert-e 2c999f4c10 Merge branch 'w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e b23472a754 Merge branch 'w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e a4999c1bfb Merge branch 'w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
bert-e fe0b0f8b2f Merge branch 'feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
Jonathan Gramain c2bee23fd1 ft: CLDSRV-354 handle null keys in metadata GET helpers
Update the helpers in metadataUtils to handle null keys, as well as
keeping backward compatibility with null versioned keys.

The main change in logic for null keys is that, instead of fetching
first the master key then the null versioned key, we first attempt to
fetch the null key, and if not found, we fetch the master key (we may
then also have to fetch the null versioned key for backward
compatibility).

Take the chance to reduce tech debt by reorganizing the helpers
responsibilities in a better way, and by using the "validateBucket"
helper.
2023-03-24 12:45:07 -07:00
Jonathan Gramain e87c2a4e5f CLDSRV-354 [cleanup] new helper metadataUtils.validateBucket()
Factorize logic to validate a bucket and return the relevant error
code in a helper function (checks on bucketShield, bucket policies,
then bucket authorization)
2023-03-24 12:42:02 -07:00
Jonathan Gramain db943cd634 CLDSRV-354 [optim] remove unnecessary check in getVersionIdResHeader() 2023-03-24 12:42:02 -07:00
bert-e bf7a643d45 Merge branch 'w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.7/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:15 +00:00
bert-e 874a53c767 Merge branch 'w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:14 +00:00
Jonathan Gramain c7e1c6921b Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:43:47 -07:00
Jonathan Gramain 6d2d56bc1e Merge remote-tracking branch 'origin/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:04:47 -07:00
bert-e 3f3bf0fdf0 Merge branches 'w/8.7/bugfix/CLDSRV-363/etag' and 'q/5078/8.6/bugfix/CLDSRV-363/etag' into tmp/octopus/q/8.7 2023-03-24 18:01:38 +00:00
bert-e 1922facb7b Merge branch 'bugfix/CLDSRV-363/etag' into q/8.6 2023-03-24 18:01:38 +00:00
Jonathan Gramain fff03d3320 CLDSRV-349 [tests] func test stage for ENABLE_NULL_VERSION_COMPAT_MODE
Turn the "file-ft-tests" job into a matrix, to duplicate the suite
with and without the ENABLE_NULL_VERSION_COMPAT_MODE environment variable.

This will make sure Cloudserver behaves correctly (versioning, null
version handling etc.)  when the compatibility mode is active.
2023-03-24 10:36:39 -07:00
Jonathan Gramain 6e79d3f1a4 ft: CLDSRV-349 support ENABLE_NULL_VERSION_COMPAT_MODE env var
Cloudserver sets a flag in its configuration when the
ENABLE_NULL_VERSION_COMPAT_MODE environment variable is set to "true".
2023-03-24 10:36:39 -07:00
bert-e 2a44949048 Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 1576352613 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 74978f423e Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 6f4cd75d6f Merge branch 'w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 00906d04f5 Merge branch 'bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:04 +00:00
Jonathan Gramain 270339f2bb CLDSRV-361 guard before accessing err.is field 2023-03-24 09:51:22 -07:00
bert-e 6660626190 Merge branch 'bugfix/CLDSRV-363/etag' into tmp/octopus/w/8.7/bugfix/CLDSRV-363/etag 2023-03-24 13:23:06 +00:00
Nicolas Humbert 049f52bf95 CLDSRV-363 ETag instead of Etag for lifecycle listings Contents 2023-03-23 16:51:12 -04:00
williamlardier 58fc0b7146
CLDSRV-350: bump to 8.7.10 2023-03-21 13:52:26 +01:00
williamlardier 11e3d7ecb2
CLDSRV-350: update veeam put and delete routes with new arsenal methods
We must ensure that concurrent updates of the bucket metadata won't conflict
with each other, by separately updating the capabilities fields. This change
ensures that two files can be uploaded at the same without any problem,
regardless of the number of cloudserver instances.
2023-03-21 13:52:25 +01:00
williamlardier 1bab851ce3
CLDSRV-350: bump arsenal version 2023-03-21 13:52:25 +01:00
bert-e 0bc0341f33 Merge branch 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e b5af652dc8 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e 6c29be5137 Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:36 +00:00
Jonathan Gramain 2967f327ed Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 15:08:01 -07:00
Jonathan Gramain 0f8a56e9b5 Merge remote-tracking branch 'origin/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 14:25:03 -07:00
Jonathan Gramain c1d2601237 CLDSRV-361 [8.x fix] backbeat multiple backend route fix
Fix handling of response of metadataGetObject() in multiple backend
PutTagging and DeleteTagging when target object does not exist: return
error code NoSuchKey in this case.

Note: NoSuchKey used to be returned by metadataGetObject() helper but
not always, now it's never returned, instead the "objMD" returned
value is null.
2023-03-20 13:25:09 -07:00
Jonathan Gramain 885f95606c bugfix: CLDSRV-361 fix exception with batch delete of null version
- Fix an exception in Cloudserver when doing a batch delete containing a
  deletion of a null version that does not exist on a versioned object

- The changes also fix the return code when fetching a noncurrent null
  version that was deleted: in such case it was returning NoSuchKey,
  now returns NoSuchVersion per spec.

The change consists of:

- consolidate and fix the API contract of metadataGetObject() and
  metadataValidateBucketAndObj() for "not found" objects or object
  versions: there was a mix of return codes from Metadata (NoSuchKey)
  and an OK response with no metadata returned in others, depending on
  the exact scenario and object state. Fixed by always returning an OK
  response and no metadata if the target version is not found, to let
  the caller set the appropriate error code in the API response

- for multiObjectDelete, fix by handling the OK response with empty
  object metadata response specifically as a "not found" case, like
  for other API calls
2023-03-20 13:12:02 -07:00
bert-e b5b0f6482b Merge branch 'feature/CLDSRV-317/listLifecycleOrphans' into tmp/octopus/w/8.7/feature/CLDSRV-317/listLifecycleOrphans 2023-03-20 13:53:09 +00:00
Nicolas Humbert ec9ed94555 CLDSRV-317 Implement listLifecycleOrphans 2023-03-20 09:52:42 -04:00
bert-e 755f282f8e Merge branch 'feature/CLDSRV-316/listLifecycleNonCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-316/listLifecycleNonCurrents 2023-03-17 18:00:21 +00:00
Nicolas Humbert 41cc399d85 CLDSRV-316 Implement listLifecycleNonCurrents 2023-03-17 13:58:22 -04:00
bert-e c4dc928de2 Merge branch 'feature/CLDSRV-314/listLifecycleCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-314/listLifecycleCurrents 2023-03-17 16:20:16 +00:00
Nicolas Humbert 6b8a2581b6 CLDSRV-314 Implement listLifecycleCurrents 2023-03-17 11:48:02 -04:00
Killian Gardahaut a0087e8d77
Bump 8.7.9
Issue: ZKOP-219
2023-03-17 09:58:21 +01:00
KillianG 8e5bea56b6
Refacto tests for more readability
Issue: CLDSRV-337
2023-03-17 09:58:21 +01:00
KillianG 976e349036
Add tests
Adding test for the function azureArchiveLocationConstraintAssert

Issue: CLDSRV-337
2023-03-17 09:58:16 +01:00
KillianG de1c23ac1b
Add test on location constraints to ensure the location is well configured
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 0b4d04a2a3
Add location azure archive to cold storage locations
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 049d396c8d
Add azure_archive location type
ISSUE: CLDSRV-337
2023-03-17 09:56:35 +01:00
Naren 5c04cbe6d1 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-327-cloudserver-metrics' into w/8.7/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:36:59 -07:00
Naren d3e538087a Merge remote-tracking branch 'origin/w/8.5/improvement/CLDSRV-327-cloudserver-metrics' into w/8.6/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:05:26 -07:00
bert-e 7cc37c7f3d Merge branch 'w/8.4/improvement/CLDSRV-327-cloudserver-metrics' into tmp/octopus/w/8.5/improvement/CLDSRV-327-cloudserver-metrics 2023-03-17 03:50:44 +00:00
Naren 399d081d68 impr: CLDSRV-327 upgrade arsenal, bucketclient, prom-client, utapi, vaultclient 2023-03-16 20:33:03 -07:00
Naren c3fac24366 Merge remote-tracking branch 'origin/improvement/CLDSRV-327-cloudserver-metrics' into w/8.4/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 20:23:37 -07:00
Naren 82687aa1a7 impr: CLDSRV-327 minor updates
add info log on metric server start.
refactor unit tests.
2023-03-16 17:35:28 -07:00
Naren 820ada48ce impr: CLDSRV-327 bump version to 7.70.18 2023-03-16 15:43:56 -07:00
Naren df73cc7ebc impr: CLDSRV-327 upgrade arsenal, bucketclient, prom-client, utapi, vaultclient 2023-03-16 15:43:07 -07:00
Naren 429c62087d impr: CLDSRV-327 refactor metrics server
prom-client will not aggregate metrics from all workers, so moved
metrics server to master and aggregate metrics using AggregatorRegistry.
Metrics are moved to a separate file to not confuse with metrics
handler.
2023-03-16 15:39:13 -07:00
Naren 13fa26986d impr: CLDSRV-327 linter corrections 2023-03-02 09:52:39 -08:00
bert-e 5cb63991a8 Merge branch 'w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.7/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 16:30:18 +00:00
Naren d5b336d1d9 Merge remote-tracking branch 'origin/w/8.5/improvement/CLDSRV-328-adapt-prescribed-metric-names' into w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 08:29:39 -08:00
bert-e 750223500d Merge branch 'improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.5/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 15:31:17 +00:00
Naren 23ffbf77d2 impr: CLDSRV-328 fix ceph java deps installation 2023-03-02 07:29:22 -08:00
Naren 6ea18bcef4 impr: CLDSRV-328 adapt metric naming conventions 2023-03-02 06:44:45 -08:00
Naren c45dac7ffc impr: CLDSRV-327 add monitoring tests 2023-02-28 11:33:12 -08:00
Naren 878fc6819f impr: CLDSRV-327 generate cloudserver metrics 2023-02-28 11:32:37 -08:00
Naren 43592f9392 impr: CLDSRV-327 setup cloudserver monitoring 2023-02-28 11:32:13 -08:00
Naren dbd1383c32 impr: CLDSRV-327 add prom-client 2023-02-28 11:32:13 -08:00
Alexander Chan c310cb3dd1 Merge remote-tracking branch 'origin/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.7/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:47:15 -08:00
bert-e 7fe0e2891b Merge branch 'w/8.5/feature/CLDSRV-336/supportNewerNoncurrentVersions' into tmp/octopus/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-27 02:43:32 +00:00
bert-e 93442fed68 Merge branch 'w/8.4/feature/CLDSRV-336/supportNewerNoncurrentVersions' into tmp/octopus/w/8.5/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-27 02:43:31 +00:00
Alexander Chan 21612cfadd Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.4/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:20:17 -08:00
bert-e 644062f088 Merge branch 'feature/CLDSRV-336/supportNewerNoncurrentVersions' into tmp/octopus/w/7.70/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-25 01:39:32 +00:00
Alexander Chan d0eb81539e bump arsenal depedency 2023-02-24 16:52:33 -08:00
bert-e 22cda51944 Merge branch 'w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/q/8.7 2023-02-22 16:39:53 +00:00
williamlardier 408d0de732
CLDSRV-343: bump cloudserver to the next version 2023-02-22 09:59:09 +01:00
williamlardier 83916c91fb
CLDSRV-343: enable back some CEPH backend tests
These tests also cover the ObjectTagging API with multiple backend.
Enabling them back will allow us to better avoid issues like this
in the future.
2023-02-17 14:24:59 +01:00
bert-e 110b2a35ed Merge branch 'w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:38 +00:00
bert-e 3b5f5875f3 Merge branch 'w/8.5/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:37 +00:00
bert-e bdaf92023f Merge branch 'w/8.4/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.5/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:37 +00:00
bert-e 25d1cd9601 Merge branch 'w/7.70/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.4/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:36 +00:00
bert-e 91c9eb6faa Merge branch 'bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/7.70/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:36 +00:00
Jonathan Gramain 6306cf7fc7 [tests] CLDSRV-344 fix func test double callback
In the completeMPU test of the aws-node-sdk test suite, fix a case of
double callback being called, that can fail when launching locally
with mocha.
2023-02-16 16:52:14 -08:00
williamlardier a8117ca037
CLDSRV-343: use bucket name for backend tagging operations 2023-02-16 15:51:49 +01:00
bert-e 9145d1cf79 Merge branches 'w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.7 2023-02-15 20:43:46 +00:00
bert-e 0fb54c9d31 Merge branches 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.6 2023-02-15 20:43:46 +00:00
bert-e 63dc33a339 Merge branches 'w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.5 2023-02-15 20:43:45 +00:00
bert-e 49d46dfe04 Merge branches 'w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/7.70/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.4 2023-02-15 20:43:44 +00:00
bert-e 4bb331392e Merge branch 'improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into q/7.70 2023-02-15 20:43:44 +00:00
bert-e ae1b6dc3d1 Merge branch 'w/8.6/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.7/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e 162157580f Merge branch 'w/8.5/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.6/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e 4e4435d82e Merge branch 'w/8.4/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.5/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
Alexander Chan b0db1f9a94 Merge remote-tracking branch 'origin/feature/CLDSRV-342/bump-7.70.16' into w/8.4/feature/CLDSRV-342/bump-7.70.16 2023-02-14 12:03:01 -08:00
Alexander Chan 35d269c27c bump version 7.70.16 2023-02-13 10:49:17 -08:00
bert-e b1304b5f7f Merge branches 'w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.7 2023-02-10 12:57:22 +00:00
bert-e c355422a7e Merge branches 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.6 2023-02-10 12:57:22 +00:00
bert-e d44334ad22 Merge branches 'w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.5 2023-02-10 12:57:21 +00:00
bert-e 6e9c50eeba Merge branches 'w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.4 2023-02-10 12:57:21 +00:00
bert-e 6c7be8892c Merge branch 'bugfix/CLDSRV-338/fixMaxKeysV2Listing' into q/7.10 2023-02-10 12:57:20 +00:00
bert-e 82df91debb Merge branches 'w/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/7.10/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/7.70 2023-02-10 12:57:20 +00:00
bert-e 6b1f8c61ec Merge branch 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:09 +00:00
bert-e a12d44dc18 Merge branch 'w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:08 +00:00
bert-e d5ec32fc5c Merge branch 'w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:08 +00:00
Jonathan Gramain e16da9ab11 Merge remote-tracking branch 'origin/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 14:43:41 -08:00
Jonathan Gramain d43e8d01bf impr: CLDSRV-340 improve preprocessingVersioningDelete()
- remove dead code handling nullVersionId and when no reference to
  null version is found: the confusion is that the metadata is already
  coming from the null version fetch due to the "reqVersionId" being
  "null", so it can only be the null version itself.

- remove the "isNull" argument returned as it is misused (for metrics)
  and can be incorrect, see S3C-7440

- simplify the function by reorganizing the logic and removing the
  callback argument, just returning an options object

- delete replay keys on multiobject delete, instead of passing the
  replayId option via preprocessingVersioningDelete() (a similar
  change has been done in objectDelete but it resulted in duplication
  of replayId setting)

- remove all MPU-related tests for this helper, as the helper does not
  return replayId anymore, they became irrelevant
2023-02-09 14:00:56 -08:00
bert-e 335bfabed1 Merge branch 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 3b92eaaef2 Merge branch 'w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e a6fd8b2261 Merge branch 'w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 00ab8d482d Merge branch 'w/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:14 +00:00
bert-e 29551f7edf Merge branch 'bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:14 +00:00
Alexander Chan 7dd022f6cb CLDSRV-338: fix v2 listing with max-keys 0
addresses issue in which listing XML does not return a value for
`max-keys' when it is 0.
2023-02-08 21:10:19 -08:00
bert-e 3398db3c0f Merge branch 'w/8.6/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.7/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:19 +00:00
bert-e 00a793be6e Merge branch 'w/8.5/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.6/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:18 +00:00
bert-e 68bb824b57 Merge branch 'w/8.4/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.5/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:18 +00:00
Jonathan Gramain 432680841e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-339-revert-S3C-7054' into w/8.4/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 14:52:41 -08:00
bert-e b2641f5c1b Merge branch 'bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/7.70/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 22:49:55 +00:00
Jonathan Gramain 66c34e0272 CLDSRV-331 Revert "CLDSRV-301 - use isNonversionedBucket flag"
This reverts commit 3da8f88a12.
2023-02-06 14:41:06 -08:00
bert-e 836e9fb22d Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:46 +00:00
bert-e 9bc7fa49ea Merge branch 'w/8.5/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.6/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:45 +00:00
bert-e e3087fb940 Merge branch 'w/8.4/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.5/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:45 +00:00
Dimitrios Vasilas 67e126320c Revert "CLDSRV-335: build federation image in tests workflow"
This reverts commit fd669664a6.
2023-02-02 04:21:00 -05:00
Dimitrios Vasilas 66520571d3 Revert "CLDSRV-335: build federation image in tests workflow"
This reverts commit fd669664a6.
2023-02-02 04:19:20 -05:00
bert-e ead7f5f7c2 Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:42 +00:00
bert-e fe636d22fc Merge branch 'w/8.5/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.6/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:41 +00:00
bert-e 6530e70761 Merge branch 'w/8.4/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.5/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:41 +00:00
bert-e 6d14bda3ed Merge branch 'w/7.70/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.4/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:40 +00:00
bert-e 416634cf11 Merge branch 'bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/7.70/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:39 +00:00
Dimitrios Vasilas fd669664a6 CLDSRV-335: build federation image in tests workflow 2023-01-31 05:20:46 -05:00
bert-e c17059dc77 Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e b4617f1362 Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e 624d4708cf Merge branch 'w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:50 +00:00
bert-e 95c180e9d9 Merge branch 'w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:50 +00:00
bert-e 5a2b465d0f Merge branch 'improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:49 +00:00
Dimitrios Vasilas 2cd10e7195 CLDSRV-333 - Make docker image compatible with scality's registry
Images built with the `provenance` flag set to `true`
are currently not supported by our registry and older versions of docker.
2023-01-27 12:20:36 -05:00
bert-e 8ace5b24a5 Merge branches 'development/8.7' and 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:48 +00:00
bert-e 4b1dcd531d Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:47 +00:00
bert-e 13ef509cbc Merge branch 'w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:47 +00:00
Dimitrios Vasilas d4feda7bbd CLDSRV-333: remove parentheses around single function argument 2023-01-25 09:34:47 -05:00
bert-e 39f7035dbd Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:42 +00:00
bert-e 7d3ab342f6 Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:41 +00:00
Dimitrios Vasilas af60df4caf Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 09:12:54 -05:00
Dimitrios Vasilas 2acd7348d4 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 09:08:52 -05:00
williamlardier bb62ed4fa7
CLDSRV-334: bump cloudserver to 8.7.7 2023-01-24 12:33:37 +01:00
williamlardier c95368858d
CLDSRV-334: bump arsenal to 8.1.82 2023-01-24 12:33:17 +01:00
bert-e ffafe6ecfc Merge branch 'improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 08:20:21 +00:00
Dimitrios Vasilas 4301fc57e2 CLDSRV-333: catch unintentional error 2023-01-23 10:50:44 -05:00
Dimitrios Vasilas 072d8324ca CLDSRV-333: test MPU delete/overwrite handling.
Add unit tests that check that the correct
fields are sent to Metadata when a MPU is
deleted or overwritten.
2023-01-23 10:50:44 -05:00
Dimitrios Vasilas 25276dae3f CLDSRV-333: add mpuUtils
Avoid code duplication in tests that create MPUs.
2023-01-23 10:50:44 -05:00
Dimitrios Vasilas bdeeb25d19 CLDSRV-333: add oldReplayId to metadataStoreParams on overwrite
When a MPU is overwritten by a PUT (createAndStoreObject)
or a copy (objectCopy), add the MPU's uploadId to
metadataStoreParams in a field called oldReplayId.
Metadata will use this field to cleanup the replay
key corrensponding to the overwritten MPU.
2023-01-23 10:50:44 -05:00
Dimitrios Vasilas 5dc17db9df CLDSRV-333: objectDelete: add MPU uploadId to delOptions
If the object to be deleted is a MPU,
include its uploadId as a field named replayId
to the options with which services.deleteObject
is called.
Metadata will use this field to cleanup the replay
key corresponding to the overwriten MPU.
2023-01-23 10:50:35 -05:00
bert-e d8ff1377fc Merge branch 'w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.7/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:31 +00:00
bert-e 425a9167ca Merge branch 'w/8.5/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:30 +00:00
bert-e 2f21b9cc52 Merge branch 'feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.5/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:30 +00:00
Alexander Chan d6433961a1 CLDSRV-329: adapt release step for 8.x 2023-01-19 18:28:59 -08:00
Alexander Chan 090b276f23 CLDSRV-329 migrate mongodb and ceph functional tests 2023-01-18 17:50:12 -08:00
Jonathan Gramain 28f4c5baee Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.7/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:44 -08:00
Jonathan Gramain 89a1c646ad Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:05 -08:00
Jonathan Gramain 5c249f0c56 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.5/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:48:27 -08:00
Jonathan Gramain c971669b9b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.4/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:47:23 -08:00
Jonathan Gramain 04e553b968 Merge remote-tracking branch 'origin/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/7.70/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:44:21 -08:00
Jonathan Gramain 57ef76548e CLDSRV-330 version bump 2023-01-14 00:21:24 -08:00
Jonathan Gramain 717a3274fc CLDSRV-330 Revert "CLDSRV-312 bump arsenal dep"
This reverts commit 3992ac2809.
2023-01-14 00:16:53 -08:00
Jonathan Gramain 1b59d0efb8 CLDSRV-330 bump arsenal version 2023-01-14 00:16:42 -08:00
bert-e 0a8f846f4b Merge branch 'w/8.6/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.7/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
bert-e 045602fc00 Merge branch 'w/8.5/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.6/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
Alexander Chan 5048c1fef1 Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-244/migrateToGithubActions' into w/8.5/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 14:34:55 -08:00
Alexander Chan 1e95d108be Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-244/migrateToGithubActions' into w/8.4/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 14:04:54 -08:00
bert-e 04abefd799 Merge branch 'feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/7.70/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 21:14:30 +00:00
Alexander Chan 2772976e86 remove duplications 2023-01-12 13:13:45 -08:00
Alexander Chan 51905f82ba remove eve CI files 2023-01-12 13:13:45 -08:00
Alexander Chan b72adc50a7 CLDSRV-244: migrate eve CI to GitHub Actions
- multibackend functional tests
- file functional tests
- utapi v2 functional tests
- kmip functional tests
2023-01-12 13:13:45 -08:00
Thomas Carmet b6def80347 CLDSRV-115 migration to gh actions 2023-01-11 16:32:35 -08:00
Thomas Carmet b3f7a22a07 CLDSRV-115 migration github actions 2023-01-11 16:32:35 -08:00
Jonathan Gramain ac5de47ca1 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-312-bumpArsenal' into w/8.7/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 16:03:10 -08:00
Jonathan Gramain 3c0f3e671a Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-312-bumpArsenal' into w/8.6/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 13:48:58 -08:00
Jonathan Gramain a3dc3f9fb8 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-312-bumpArsenal' into w/8.5/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 13:44:46 -08:00
Jonathan Gramain e4bf9500a3 Merge branch 'w/7.70/bugfix/CLDSRV-312-bumpArsenal' into w/8.4/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 12:41:33 -08:00
Jonathan Gramain ac33897f25 Merge remote-tracking branch 'origin/bugfix/CLDSRV-312-bumpArsenal' into w/7.70/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 11:33:15 -08:00
Jonathan Gramain c6a640ca9d CLDSRV-312 bump cloudserver version 2023-01-09 10:39:19 -08:00
Jonathan Gramain 3992ac2809 CLDSRV-312 bump arsenal dep
Bump arsenal dependency and fix NextMarker in listing unit tests

Because of changes in the Delimiter/DelimiterMaster listing
algorithms, the NextMarker is now always an actual listable key
instead of possibly be a common prefix, so existing tests need to
reflect this.
2023-01-09 10:39:00 -08:00
williamlardier c147785464
CLDSRV-322: bump cloudserver version 2023-01-06 09:04:04 +01:00
williamlardier ca8c788757
CLDSRV-322: code improvements 2023-01-06 09:04:03 +01:00
williamlardier cb2af364bb
CLDSRV-322: Implement test for custom routes
Unit and funcitonal tests are implemented to test the custom routes.
The LISTing is not yet tested, as it requires more changes to
generate a valid signature, from Mocha.
2023-01-05 15:31:33 +01:00
williamlardier 1eb27d610b
CLDSRV-322: Support custom files for MultiObjectDelete
MultiObjectDelete is implemented by the product UI to delete the
files in buckets. This method is a POST that relies on the request
body to filter the objects, hence, it is not possible to filter
it as an ingress rule in nginx.

The implementation tries to avoid adding any complexity
by extending existing loops, and implementing a new step if elligible
files are found.

These files are extracted from the Veeam route list of accepted files,
but this implementation might change if more custom APIs are supported
in the future.
2023-01-05 15:31:33 +01:00
williamlardier 73b295c91d
CLDSRV-322: Implement LIST for SOSAPI routes
Listing of objects is needed for consistent user experience in the
product's User Interface.

Listing is implemented as a `GET` request with a specific query parameter
`list-type` and folder `.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c`.

This API:
- Handles both versioned and non-versioned listing
- Relies on predefined templates to fill the response content
- Extracts the system.xml and capacity.xml files from the bucket
  metadata
- Computes the listing response based on the input query parameters
  and files currently in the bucket md capabilities
- Handle errors if any non supported query parameter is used. As any GET
  request is routed to this method, we return InvalidRequest if the requested
  action is not supported (i.e., not a listing v2)
2023-01-05 15:31:32 +01:00
williamlardier 8186c84bf9
CLDSRV-322: Implement DELETE for SOSAPI routes
Deletion of Veam SOSAPI files are required to ensure consistent
user experience. This API is only exposed to API/CLI clients, not
User Interface. The User Interface relies on MultiObjectDelete to
perform the deletions, and is handled in a separate commit.

This API:
- Checks that the requested file exists
- Erase the bucket metadata according to the file
- Update the bucket metadata with the updated values.
- Handle errors if the Veaam capability is not yet enabled for the bucket
2023-01-05 15:31:32 +01:00
williamlardier 93ef2d0545
CLDSRV-322: Implement HEAD for SOSAPI routes
HEAD object is not formally required by Veeam SOSAPI, but Veeam
relies on the last-modified date value of the capacity.xml file.
To suppoort any change in future SOSAPI standard, the HEAD method
is implemented, and is similar to the GET method, where only the metadata
are returned.
2023-01-05 15:31:31 +01:00
williamlardier d7d0a31bb1
CLDSRV-322: Implement PUT for SOSAPI routes
In the SOSAPI context, the user is requested to pre-created two files,
system.xml and capacity.xml under the veeam folder, to enable the feature.

This API:
- Extracts the XML from the provided file, convert it to JSON
- Validate that the JSON is valid against joi schemes, if applicable
- Updates the bucket metadata, including the last-modified date
- Update in the database the bucket metadata
- Return the standard success code response
- Handle invalid XML or XML structure, and return an error accordingly
2023-01-05 15:31:31 +01:00
williamlardier 4c69b82508
CLDSRV-322: Implement GET for SOSAPI
The GET method is used by SOSAPI to determine if SOS API is enabled
or not on a bucket.

Two files are supported: system.xml and capacity.xml.

This API:
- Get the bucket metadata
- Dynamically recomputes a valid XML based on the bucket md content
  using xml2js as headless, to enforce the same XML as the one
  from SOSAPI standard
- Rejects the request with an error if the bucket metadata does not
  exist
- Handle the `?tagging` request, required for versioned bucket, to
  return a static content.

Output stream relies on the utils file.
2023-01-05 15:31:30 +01:00
williamlardier ca13284da3
CLDSRV-322: implement common util functions
Custom SOSAPI routes might either retreive or stream data. The utils file
re-implement, with support for this particular context, some functions
from the standard API paths, from Arsenal.

These changes mostly introduce ways to compute the right HTTP headers as
well as input our output streams to handle GET or PUT request types.
2023-01-05 15:31:30 +01:00
williamlardier c6ed75a1d7
CLDSRV-322: implement SOSAPI scheme validator
SOSAPI relies on standard XML files for both the system and the capacity.
It is used by Veeam12+ to determine what capabilities and/or
configuration should be enforced for a given S3-integrated Bucket used
for backups.

The commit introduces scheme validation for JSON objects, as XML will
be first converted using xml2js.

The system.xml file includes the protocol version of SOSAPI: if the
version is not know, no validation is made, to allow for future changes
without formal need to update the product.

Note: maximum XML file size, in case of unsupported protocol version, will
be enforced to avoid spacing issues with the database.
2023-01-05 15:31:30 +01:00
williamlardier 402d0dea1a
CLDSRV-322: Create a new route for Veeam12 SOS API.
This new route is exposed through special nginx rules
from Zenko-Operator, to redirect any call to the veeam
folder, located under .system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c
at the root of the bucket. The goal is to store files in
the bucket metadata, to ease their update by internal jobs.
To avoid impacting standard API, we rely on custom routes
with dedicated logic to handle these files, in a generic
way.

This commit introduces a new route that will manage, in turn,
the:
- Handling of incoming request.
- Validity checks, including list of suppoorted APIs according
  to the HTTP verb and query parameters.
- Authentication and Authorization with Vault, in the same
  way as usual files.
- Check of the targeted bucket and/or keys, to extract the
  bucket metadata.
- Routing of the request to the right API handler.
2023-01-05 15:31:29 +01:00
williamlardier 95faec1db0
CLDSRV-322: bump arsenal version 2023-01-05 15:31:29 +01:00
Jonathan Gramain ca9d53f430 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-321-version-bump' into w/8.7/bugfix/CLDSRV-321-version-bump 2022-12-26 11:19:03 -08:00
Jonathan Gramain ba27ff7980 Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-321-version-bump' into w/8.6/bugfix/CLDSRV-321-version-bump 2022-12-26 11:18:27 -08:00
Jonathan Gramain 8957997e23 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-321-version-bump' into w/8.5/bugfix/CLDSRV-321-version-bump 2022-12-26 11:17:26 -08:00
Jonathan Gramain 3caeda5d39 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-321-version-bump' into w/8.4/bugfix/CLDSRV-321-version-bump 2022-12-26 11:15:32 -08:00
Jonathan Gramain feed423f56 Merge remote-tracking branch 'origin/bugfix/CLDSRV-321-version-bump' into w/7.70/bugfix/CLDSRV-321-version-bump 2022-12-26 11:12:06 -08:00
Jonathan Gramain 4981d8f342 CLDSRV-321 bump version 2022-12-26 10:56:55 -08:00
bert-e b1ee1f8ef7 Merge branch 'w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.7/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
bert-e 28d778c2d4 Merge branch 'w/8.5/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
bert-e b180aac9ba Merge branch 'w/8.4/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.5/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:25 +00:00
Taylor McKinnon c353452128 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-321/fix_retention_extension_check' into w/8.4/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 15:26:32 -08:00
bert-e 101b13abce Merge branch 'bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/7.70/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:24:18 +00:00
Taylor McKinnon 9f5ae852bf bf(CLDSRV-321): Fix retention extension check to consider same date as extended 2022-12-21 15:11:21 -08:00
williamlardier e882cb6781
Merge remote-tracking branch 'origin/bugfix/CLDSRV-320-bump-arsenal' into w/8.7/bugfix/CLDSRV-320-bump-arsenal 2022-12-20 17:44:48 +01:00
williamlardier 8543f1a934
CLDSRV-320: bump project version 2022-12-20 17:43:02 +01:00
williamlardier fc871fbbfa
CLDSRV-320: bump arsenal to 8.1.77 2022-12-20 17:42:59 +01:00
Francois Ferrand cb7303636c
Release bump 8.7.1
Issue: CLDSRV-306
2022-12-16 19:56:19 +01:00
Francois Ferrand 6d0f889c23
Merge remote-tracking branch 'origin/feature/CLDSRV-306' into w/8.7/feature/CLDSRV-306 2022-12-16 19:54:23 +01:00
Francois Ferrand c13f2ae6a5
Merge remote-tracking branch 'origin/improvement/CLDSRV-305' into w/8.7/improvement/CLDSRV-305 2022-12-16 18:08:52 +01:00
Francois Ferrand 03058371e9
Release bump 8.6.4
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 473fed7594
Migrate tests to new azure storage sdk
Issue: CLDSRV-305
2022-12-16 17:54:09 +01:00
Francois Ferrand d86b9144be
Handle isSameAzureAccount() for other auth methods
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 2f2d9ced4c
Add unit tests for azure auth config
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 57a0ffc746
Support alternate azure auth methods in config
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand d839cf2394
Bump arsenal
https://github.com/scality/Arsenal/tree/improvement/ARSN-281

Issue: CLDSRV-305
2022-12-16 16:21:43 +01:00
bert-e b6611c4711 Merge branch 'w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.7/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:48 +00:00
bert-e 461f5ac5f9 Merge branch 'w/8.4/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.5/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:47 +00:00
bert-e 413a42adf0 Merge branch 'w/8.5/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:47 +00:00
Jonathan Gramain 7be27e0a83 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into w/8.4/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 14:51:13 -08:00
Jonathan Gramain 3d3252361d Merge remote-tracking branch 'origin/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into w/7.70/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 12:21:16 -08:00
Jonathan Gramain dad8bc7195 CLDSRV-173 CLDSRV-170 CLDSRV-177 S3C-5390 bump version 2022-12-15 11:47:24 -08:00
Artem Bakalov e6bda3460b remove .only
(cherry picked from commit b4725aa032)
(cherry picked from commit af95fea311)
2022-12-15 11:47:24 -08:00
Artem Bakalov 64334db65a S3C-5390 s3api head-object with part-number 1 on empty file fails: httpCode 416
(cherry picked from commit 4f3195a6ca)
(cherry picked from commit 8f3e737664)
2022-12-15 11:47:24 -08:00
Jonathan Gramain fa562ae85a CLDSRV-177 fix backbeat func tests for CLDSRV-290
Since CLDSRV-290, backbeat routes functional tests need to pass a
versionId in the query string in the `/_/backbeat/metadata` route,
hence the original tests from CLDSRV-177 needed to be updated.
2022-12-15 11:47:24 -08:00
Jonathan Gramain 6f32ebb2ce [7.10] CLDSRV-177 add missing test helper checkObjectData
(cherry picked from commit 413ebe743c)
(cherry picked from commit a1c4420eab)
2022-12-14 23:31:35 -08:00
Jonathan Gramain 2b32ec6163 bugfix: CLDSRV-177 fix crash with empty object replication
Fix a case of crash when a replication occurs with an empty object
over a non-empty object.

It is not clear how this happens in practice but there can be some
corner cases with race conditions between object replication and
versioning getting suspended on the target bucket at the same time,
that could lead to this situation, as the check between replication
configuration and actual replication is not atomic.

(cherry picked from commit a4e8cbebe6)
(cherry picked from commit 108d1c920f)
2022-12-14 23:31:29 -08:00
Jonathan Gramain d4063e157a bugfix: CLDSRV-170 skip orphan cleanup in UploadPart[Copy]
Do not delete orphan data in UploadPart/UploadPartCopy on overwrite
iff a CompleteMPU of the target MPU is already in progress.

This is to prevent a race condition where a CompleteMPU is running
while UploadPart is uploading a part for the same MPU.

It leaves an orphan in storage since only one of the upload data will
be present in the finished MPU, but the window is limited to the
CompleteMPU execution and should only occur when there are retries of
UploadPart due to prior stuck requests, or with broken clients
misusing the MPU API, so it should be acceptable.

Implementation details:

- set a flag in the MPU overview key when starting the CompleteMPU
  process, before listing the parts from metadata to construct the
  manifest

- in UploadPart/UploadPartCopy, after the part metadata is written and
  if the same part already existed, re-fetch the MPU overview key to
  check the flag: if set, skip the deletion of the old data of this
  part, since the CompleteMPU process in progress may choose either
  part data depending on the exact timing of the listing vs. the
  part overwrite.

(cherry picked from commit 8496111518)
(cherry picked from commit c2dbbfa008)
2022-12-14 23:31:07 -08:00
Nicolas Humbert a481384538 CLDSRV-173 DeleteMarkers created by Lifecycle should not be replicated
(cherry picked from commit d30430a81c)
2022-12-14 23:31:07 -08:00
bert-e ae4ece471b Merge branch 'w/8.7/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/q/8.7 2022-12-14 21:19:55 +00:00
williamlardier 15b61cd947
CLDSRV-297: bump cloudserver to 8.7.0 2022-12-14 18:16:55 +01:00
williamlardier 91536c575f
CLDSRV-297: bump projects versions 2022-12-14 18:16:52 +01:00
bert-e 864ce1f27d Merge branch 'w/8.4/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/w/8.5/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-14 04:56:43 +00:00
bert-e 9d007a76b1 Merge branch 'w/8.5/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/w/8.6/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-14 04:56:43 +00:00
Artem Bakalov f4e292c6f9 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into w/8.4/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-13 20:55:44 -08:00
bert-e 436d1a9eab Merge branch 'improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/w/7.70/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-14 04:52:48 +00:00
Artem Bakalov 3da8f88a12 CLDSRV-301 - use isNonversionedBucket flag 2022-12-13 20:51:28 -08:00
Francois Ferrand a2eb347fe3
Join latency lines when no operation
Latency is expected to be NaN when there are no operation: and we should
not mask this as "0 latency", which would be incorrect.

To make the graph easier to read and less confusing, we now 'join' the
lines if there is less than 3 minutes with no data.

As we plot the individual data points on the graph, the missing data can
still be observed by looking for these points.

Issue: CLDSRV-309
2022-11-25 14:03:35 +01:00
Francois Ferrand 0ff1262f97
Display metrics in op/s
This is more practical for ops, when dealing with large amount of data.

Issue: CLDSRV-309
2022-11-25 14:03:35 +01:00
Francois Ferrand 54a23d90c1
Fix http method breakdown
Should compute value based on all values in the current window.

Issue: CLDSRV-309
2022-11-23 21:42:06 +01:00
Kerkesni eb3dc9b79f
feature: CLDSRV-308 bump version in package.json to 8.6.3 2022-11-18 14:48:56 +01:00
bert-e 2c8968ef4a Merge branch 'feature/CLDSRV-304-support-object-restore-completed-notification' into q/8.6 2022-11-16 09:42:12 +00:00
Kerkesni a449aa35f4
feature: CLDSRV-303 support s3:ObjectRestore:Completed event notification 2022-11-15 19:11:49 +01:00
Kerkesni c2c8582585
feature: CLDSRV-303 support s3:ObjectRestore:Post event notification 2022-11-15 17:31:22 +01:00
Kerkesni 82c1bd7211
feature: CLDSRV-277 bump arsenal to 8.1.72 2022-11-14 11:23:02 +01:00
bert-e 776af747f2 Merge branch 'feature/CLDSRV-295/bumpArsenalVersion' into tmp/octopus/w/8.6/feature/CLDSRV-295/bumpArsenalVersion 2022-11-12 10:40:37 +00:00
Alexander Chan 453fec0cb0 CLDSRV-295: bump arsenal 8.1.71 2022-11-11 22:03:21 -08:00
bert-e f9fd3cae16 Merge branch 'w/8.5/bugfix/CLDSRV-293/bump_cloudserver_version' into tmp/octopus/w/8.6/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 22:01:36 +00:00
bert-e 3662c406ec Merge branch 'w/8.4/bugfix/CLDSRV-293/bump_cloudserver_version' into tmp/octopus/w/8.5/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 22:01:36 +00:00
Taylor McKinnon 243876ef81 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-293/bump_cloudserver_version' into w/8.4/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 14:01:05 -08:00
Taylor McKinnon cf4706816f Merge remote-tracking branch 'origin/bugfix/CLDSRV-293/bump_cloudserver_version' into w/7.70/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 13:56:54 -08:00
Taylor McKinnon 368971dacb bf(CLDSRV-293): Bump version 2022-11-11 13:46:00 -08:00
bert-e f6fe11b763 Merge branch 'w/8.5/bugfix/CLDSRV-293/refactor_olock_checks' into tmp/octopus/w/8.6/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 19:54:55 +00:00
Taylor McKinnon 5f94fce344 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-293/refactor_olock_checks' into w/8.5/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 11:54:16 -08:00
Taylor McKinnon af8420fe3c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-293/refactor_olock_checks' into w/8.4/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 11:52:21 -08:00
bert-e c3b209cbb5 Merge branch 'bugfix/CLDSRV-293/refactor_olock_checks' into tmp/octopus/w/7.70/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 19:47:53 +00:00
Taylor McKinnon 3d6b7354a5 bf(CLDSRV-293): convert multiObjectDelete to use ObjectLockInfo helper 2022-11-11 11:47:14 -08:00
Taylor McKinnon a5d694a92c bf(CLDSRV-293): convert objectDelete to use ObjectLockInfo helper 2022-11-11 11:47:14 -08:00
Taylor McKinnon 990e821ac8 bf(CLDSRV-293): convert objectPutRetention to use ObjectLockInfo helper 2022-11-11 09:20:31 -08:00
Taylor McKinnon 8170bb9965 bf(CLDSRV-293): Add tests for object lock refactor 2022-11-11 09:20:07 -08:00
Taylor McKinnon 7e559d08c9 bf(CLDSRV-293): Refactor object lock helpers to centralize logic 2022-11-11 09:20:07 -08:00
bert-e e5c58ecc3d Merge branch 'w/8.5/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:34 +00:00
bert-e 6ef88fd60e Merge branch 'w/8.4/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:34 +00:00
bert-e 483e91a8d6 Merge branch 'w/7.70/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.4/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:33 +00:00
bert-e 3c99c67a33 Merge branch 'bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/7.70/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:33 +00:00
Jonathan Gramain 29f87c7f2f chore: CLDSRV-296 remove get_product_version.sh
Not useful anymore and does not support hotfix versions x.y.z-p
2022-11-04 13:15:58 -07:00
Jonathan Gramain 7692d2c376 Merge remote-tracking branch 'origin/w/8.5/feature/CLDSRV-294-bump-7.10.19' into w/8.6/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:37:43 -07:00
Jonathan Gramain a0d7b07dc6 Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-294-bump-7.10.19' into w/8.5/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:36:36 -07:00
Jonathan Gramain a9c21b98f9 Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-294-bump-7.10.19' into w/8.4/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:23:12 -07:00
Jonathan Gramain fa9232f137 Merge remote-tracking branch 'origin/feature/CLDSRV-294-bump-7.10.19' into w/7.70/feature/CLDSRV-294-bump-7.10.19 2022-11-03 17:07:18 -07:00
Jonathan Gramain bcf3b4a16a CLDSRV-294 bump version to 7.10.19 2022-11-03 17:04:56 -07:00
bert-e 3257f4e905 Merge branch 'w/8.5/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into tmp/octopus/w/8.6/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 22:35:18 +00:00
bert-e 1d190019f7 Merge branch 'w/8.4/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into tmp/octopus/w/8.5/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 22:35:17 +00:00
Jonathan Gramain 79e7dc3946 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into w/8.4/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 15:31:25 -07:00
bert-e 1144e6bb33 Merge branch 'bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into tmp/octopus/w/7.70/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-02 22:05:56 +00:00
Jonathan Gramain 950542237f CLDSRV-291 update arsenal dep 2022-11-02 15:02:29 -07:00
Jonathan Gramain a3c3511ff9 bugfix: CLDSRV-291 test for HEAD object with bucket policy
Add unit tests to show that HEAD object requests are allowed when
bucket policy grants the `s3:GetObject` permission to another account
or user
2022-11-01 18:28:57 -07:00
Francois Ferrand 7db26fae9a
Release 8.6.1 2022-10-28 15:36:35 +02:00
Francois Ferrand 7faf8c2366
Fix chunk upload/download size
The formula is not statistically accurate, but it gives an estimation,
assuming the repartition of object size is somewhat linear.

Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand e803078952
Add per-operation latency and count
Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand cfd72f3a38
Fix last report query
Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand 69a96d3993
Fix rounding of counts
Add round() operator in query instead of limiting to 0 decimals, to
allow grafana to display fractional value when there is a "unit" (like
`1.25K`)

Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Taylor McKinnon d5bb8d8ed3 Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.6/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:43:28 -07:00
Taylor McKinnon aeb8de54db Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.5/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:37:03 -07:00
Taylor McKinnon 8f62260d70 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.4/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:36:19 -07:00
Taylor McKinnon 293930ff74 Merge remote-tracking branch 'origin/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/7.70/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 11:11:39 -07:00
Taylor McKinnon dd6deff075 bf(CLDSRV-289): Bump version to 7.10.18 2022-10-27 09:30:57 -07:00
Taylor McKinnon 4174106c2d bf(CLDSRV-289): Fix putDeleteMarkerObject metric for version suspended buckets 2022-10-27 09:30:57 -07:00
bert-e 29985f8955 Merge branch 'w/8.5/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into tmp/octopus/w/8.6/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 22:53:59 +00:00
Jonathan Gramain b081918317 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into w/8.5/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 15:53:06 -07:00
Jonathan Gramain 9049555887 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into w/8.4/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 10:47:34 -07:00
bert-e 41063705a9 Merge branch 'bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into tmp/octopus/w/7.70/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 17:35:08 +00:00
Jonathan Gramain 7cdb395ee3 bugfix: CLDSRV-290 fix `PUT /_/backbeat/metadata` versioning logic
Fix the logic by always using the provided `versionId` in the query
string as the version to put, instead of relying on the version stored
in the metadata. Not passing a `versionId` now amounts to creating a
new version.

The previous logic was causing a possible confusion when no
`versionId` was passed in the query string, that allowed valid data
locations to be removed as if it was an overwrite.
2022-10-24 17:46:49 -07:00
Jonathan Gramain 45c6aefc35 bugfix: CLDSRV-290 [test] update `PUT /_/backbeat/metadata` tests
- modify existing `PUT /_/backbeat/metadata` tests to always pass the
  `versionId` in the query string, as it should be with the updated
  API contract

- create a new test that does not pass the `versionId` in the query
  string on an update, and expects a new version to be created (and
  both versions to be readable to ensure no cleanup occurred)

- general tech debt cleanup: update the test `versionId` to be in the
  new base64 format when encoded by removing the extra info, making it
  exactly 27 characters long
2022-10-24 17:45:24 -07:00
bert-e b125bcb0b7 Merge branch 'w/8.5/bugfix/CLDSRV-275/bump-utapi' into tmp/octopus/w/8.6/bugfix/CLDSRV-275/bump-utapi 2022-10-21 18:25:46 +00:00
bert-e dd93e2f0be Merge branch 'w/8.4/bugfix/CLDSRV-275/bump-utapi' into tmp/octopus/w/8.5/bugfix/CLDSRV-275/bump-utapi 2022-10-21 18:25:46 +00:00
Taylor McKinnon d8dc35f1cf Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-275/bump-utapi' into w/8.4/bugfix/CLDSRV-275/bump-utapi 2022-10-21 11:25:09 -07:00
Taylor McKinnon 3df9712648 Merge remote-tracking branch 'origin/bugfix/CLDSRV-275/bump-utapi' into w/7.70/bugfix/CLDSRV-275/bump-utapi 2022-10-21 11:21:38 -07:00
Taylor McKinnon d45b543053 bf(CLDSRV-275): Bump version to 7.10.17 2022-10-18 11:29:53 -07:00
Taylor McKinnon 3910b25f1c bf(CLDSRV-275): Bump utapi to 7.10.10 2022-10-18 11:27:11 -07:00
Francois Ferrand 4e935dff1a
Release 8.6.0
Issue: CLDSRV-287
2022-10-17 10:10:04 +02:00
Francois Ferrand ecd54df821
Use node:16.17.1 bulleye slim base image
* Use more recent base image to get CVE fixes
* Use separate builder image to minimize the prod image

Issue: CLDSRV-287
2022-10-17 10:09:30 +02:00
bert-e d523b6f1b6 Merge branch 'w/8.4/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended' into tmp/octopus/w/8.5/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended 2022-10-15 02:10:11 +00:00
Artem Bakalov ab95973786 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended' into w/8.4/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended 2022-10-14 19:09:19 -07:00
bert-e fa99e2f3b2 Merge branch 'bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended' into tmp/octopus/w/7.70/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended 2022-10-15 02:07:33 +00:00
Artem Bakalov 49fded7d5f CLDSRV-275 - utapi-v1 delete inconsistency with versioning suspended 2022-10-14 18:49:49 -07:00
bert-e de094c53cd Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-07 20:49:44 +00:00
bert-e 0234ec7461 Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-07 20:49:44 +00:00
bert-e 34ece584a2 Merge branch 'bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/7.70/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-07 20:49:44 +00:00
Naren 464930ff16 bf: CLDSRV-285 execute symlinking scality-kms separately 2022-10-07 13:48:01 -07:00
bert-e 4f1bd8e634 Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 23:08:24 +00:00
bert-e bcabab454c Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 23:08:23 +00:00
Naren fdcecbf5ef Merge remote-tracking branch 'origin/bugfix/CLDSRV-285-update-release-dockerfile' into w/7.70/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 15:46:33 -07:00
Naren 9e186f7107 bf: CLDSRV-285 bump version to 7.10.16 2022-10-06 13:33:20 -07:00
Naren 82316c7b10 bf: CLDSRV-285 update release dockerfile 2022-10-06 13:29:34 -07:00
bert-e 47352b1df1 Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 18:52:22 +00:00
bert-e a019e89ebb Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 18:52:21 +00:00
bert-e d0eef7bf3f Merge branch 'bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/7.70/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 18:52:21 +00:00
Naren 1db16d1cda bf: CLDSRV-285 update release dockerfile
install git-lfs as root
2022-10-06 11:51:27 -07:00
bert-e 59c6a9fb2a Merge branches 'w/8.5/bugfix/CLDSRV-285-correct-docker-image' and 'q/4824/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/q/8.5 2022-10-05 16:29:08 +00:00
bert-e 0c27fbebea Merge branch 'w/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/q/8.4 2022-10-05 16:29:08 +00:00
bert-e 01afc596e9 Merge branch 'w/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-correct-docker-image 2022-10-05 02:51:42 +00:00
bert-e dff4c42971 Merge branch 'w/7.70/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-correct-docker-image 2022-10-05 02:51:42 +00:00
bert-e 55710d6a64 Merge branch 'bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/w/7.70/bugfix/CLDSRV-285-correct-docker-image 2022-10-05 02:51:41 +00:00
Naren 6e714cdb84 bf: CLDSRV-285 update dockerfile to match federation 2022-10-04 19:50:17 -07:00
williamlardier 3ce13ddde9
Merge remote-tracking branch 'origin/bugfix/CLDSRV-286-bump-arsenal-fix-authz-regression' into w/8.5/bugfix/CLDSRV-286-bump-arsenal-fix-authz-regression 2022-10-04 19:43:21 +02:00
williamlardier a327aa83c1
CLDSRV-286: use latest Arsenal version
s
2022-10-04 19:39:14 +02:00
williamlardier 667cd471a4
CLDSRV-286: bump dockerfile base image 2022-10-04 14:53:00 +02:00
340 changed files with 24732 additions and 12105 deletions

View File

@ -1,5 +1,8 @@
{ {
"extends": "scality", "extends": "scality",
"plugins": [
"mocha"
],
"rules": { "rules": {
"import/extensions": "off", "import/extensions": "off",
"lines-around-directive": "off", "lines-around-directive": "off",
@ -42,7 +45,8 @@
"no-restricted-properties": "off", "no-restricted-properties": "off",
"new-parens": "off", "new-parens": "off",
"no-multi-spaces": "off", "no-multi-spaces": "off",
"quote-props": "off" "quote-props": "off",
"mocha/no-exclusive-tests": "error",
}, },
"parserOptions": { "parserOptions": {
"ecmaVersion": 2020 "ecmaVersion": 2020

43
.github/actions/setup-ci/action.yaml vendored Normal file
View File

@ -0,0 +1,43 @@
---
name: "Setup CI environment"
description: "Setup Cloudserver CI environment"
runs:
using: composite
steps:
- name: Setup etc/hosts
shell: bash
run: sudo echo "127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" | sudo tee -a /etc/hosts
- name: Setup Credentials
shell: bash
run: bash .github/scripts/credentials.bash
- name: Setup job artifacts directory
shell: bash
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup python2 test environment
shell: bash
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

36
.github/docker/creds.env vendored Normal file
View File

@ -0,0 +1,36 @@
azurebackend_AZURE_STORAGE_ACCESS_KEY
azurebackend_AZURE_STORAGE_ACCOUNT_NAME
azurebackend_AZURE_STORAGE_ENDPOINT
azurebackend2_AZURE_STORAGE_ACCESS_KEY
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME
azurebackend2_AZURE_STORAGE_ENDPOINT
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME
azurebackendmismatch_AZURE_STORAGE_ENDPOINT
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT
azuretest_AZURE_BLOB_ENDPOINT
b2backend_B2_ACCOUNT_ID
b2backend_B2_STORAGE_ACCESS_KEY
GOOGLE_SERVICE_EMAIL
GOOGLE_SERVICE_KEY
AWS_S3_BACKEND_ACCESS_KEY
AWS_S3_BACKEND_SECRET_KEY
AWS_S3_BACKEND_ACCESS_KEY_2
AWS_S3_BACKEND_SECRET_KEY_2
AWS_GCP_BACKEND_ACCESS_KEY
AWS_GCP_BACKEND_SECRET_KEY
AWS_GCP_BACKEND_ACCESS_KEY_2
AWS_GCP_BACKEND_SECRET_KEY_2
b2backend_B2_STORAGE_ENDPOINT
gcpbackend2_GCP_SERVICE_EMAIL
gcpbackend2_GCP_SERVICE_KEY
gcpbackend2_GCP_SERVICE_KEYFILE
gcpbackend_GCP_SERVICE_EMAIL
gcpbackend_GCP_SERVICE_KEY
gcpbackendmismatch_GCP_SERVICE_EMAIL
gcpbackendmismatch_GCP_SERVICE_KEY
gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE

92
.github/docker/docker-compose.yaml vendored Normal file
View File

@ -0,0 +1,92 @@
services:
cloudserver:
image: ${CLOUDSERVER_IMAGE}
command: sh -c "yarn start > /artifacts/s3.log"
network_mode: "host"
volumes:
- /tmp/ssl:/ssl
- /tmp/ssl-kmip:/ssl-kmip
- ${HOME}/.aws/credentials:/root/.aws/credentials
- /tmp/artifacts/${JOB_NAME}:/artifacts
environment:
- CI=true
- ENABLE_LOCAL_CACHE=true
- REDIS_HOST=0.0.0.0
- REDIS_PORT=6379
- REPORT_TOKEN=report-token-1
- REMOTE_MANAGEMENT_DISABLE=1
- HEALTHCHECKS_ALLOWFROM=0.0.0.0/0
- DATA_HOST=0.0.0.0
- METADATA_HOST=0.0.0.0
- S3BACKEND
- S3DATA
- S3METADATA
- MPU_TESTING
- S3VAULT
- S3_LOCATION_FILE
- ENABLE_UTAPI_V2
- BUCKET_DENY_FILTER
- S3KMS
- S3KMIP_PORT
- S3KMIP_HOSTS
- S3KMIP-COMPOUND_CREATE
- S3KMIP_BUCKET_ATTRIBUTE_NAME
- S3KMIP_PIPELINE_DEPTH
- S3KMIP_KEY
- S3KMIP_CERT
- S3KMIP_CA
- MONGODB_HOSTS=0.0.0.0:27018
- MONGODB_RS=rs0
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
- redis
extra_hosts:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com:127.0.0.1"
- "pykmip.local:127.0.0.1"
redis:
image: redis:alpine
network_mode: "host"
squid:
network_mode: "host"
profiles: ['ci-proxy']
image: scality/ci-squid
command: >-
sh -c 'mkdir -p /ssl &&
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem &&
cp /ssl/myca.pem /ssl/CA.pem &&
squid -f /etc/squid/squid.conf -N -z &&
squid -f /etc/squid/squid.conf -NYCd 1'
volumes:
- /tmp/ssl:/ssl
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:
network_mode: "host"
profiles: ['mongo', 'ceph']
image: ${MONGODB_IMAGE}
ceph:
network_mode: "host"
profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

28
.github/docker/mongodb/Dockerfile vendored Normal file
View File

@ -0,0 +1,28 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -0,0 +1,4 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

15
.github/docker/mongodb/conf/mongod.conf vendored Normal file
View File

@ -0,0 +1,15 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

3
.github/docker/sproxyd/Dockerfile vendored Normal file
View File

@ -0,0 +1,3 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -0,0 +1,26 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

88
.github/docker/sproxyd/conf/nginx.conf vendored Normal file
View File

@ -0,0 +1,88 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -0,0 +1,12 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -0,0 +1,43 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -2,9 +2,9 @@
set -x #echo on set -x #echo on
set -e #exit at the first error set -e #exit at the first error
mkdir -p ~/.aws mkdir -p $HOME/.aws
cat >>/root/.aws/credentials <<EOF cat >>$HOME/.aws/credentials <<EOF
[default] [default]
aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY aws_access_key_id = $AWS_S3_BACKEND_ACCESS_KEY
aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY aws_secret_access_key = $AWS_S3_BACKEND_SECRET_KEY

View File

@ -1,7 +1,10 @@
name: Test alerts name: Test alerts
on: on:
push push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs: jobs:
run-alert-tests: run-alert-tests:
@ -17,13 +20,16 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }} - name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.1 uses: scality/action-prom-render-test@1.0.3
with: with:
alert_file_path: monitoring/alerts.yaml alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }} test_file_path: ${{ matrix.tests.file }}
alert_inputs: >- alert_inputs: |
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3 namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }} github_token: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/codeql.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
---
name: codeQL
on:
push:
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
jobs:
analyze:
name: Static analysis with CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3

View File

@ -0,0 +1,16 @@
---
name: dependency review
on:
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4

View File

@ -1,5 +1,6 @@
--- ---
name: release name: release
run-name: release ${{ inputs.tag }}
on: on:
workflow_dispatch: workflow_dispatch:
@ -8,26 +9,70 @@ on:
description: 'Tag to be released' description: 'Tag to be released'
required: true required: true
env:
PROJECT_NAME: ${{ github.event.repository.name }}
jobs: jobs:
build-federation-image: build-federation-image:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1 runs-on: ubuntu-20.04
secrets: inherit steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with: with:
push: true push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: . context: .
file: images/svc-base/Dockerfile file: images/svc-base/Dockerfile
tag: ${{ github.event.inputs.tag }}-svc-base tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
github-release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Push dashboards into the production namespace
run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v1 uses: softprops/action-gh-release@v2
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ github.token }}
with: with:
name: Release ${{ github.event.inputs.tag }} name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }} tag_name: ${{ github.event.inputs.tag }}

533
.github/workflows/tests.yaml vendored Normal file
View File

@ -0,0 +1,533 @@
---
name: tests
on:
workflow_dispatch:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
env:
# Secrets
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackend_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY_2 }}
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME_2 }}
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT_2 }}
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
${{ secrets.AZURE_STORAGE_ACCESS_KEY }}
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
${{ secrets.AZURE_STORAGE_ACCOUNT_NAME }}
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
${{ secrets.AZURE_STORAGE_ENDPOINT }}
azuretest_AZURE_BLOB_ENDPOINT: "${{ secrets.AZURE_STORAGE_ENDPOINT }}"
b2backend_B2_ACCOUNT_ID: "${{ secrets.B2BACKEND_B2_ACCOUNT_ID }}"
b2backend_B2_STORAGE_ACCESS_KEY: >-
${{ secrets.B2BACKEND_B2_STORAGE_ACCESS_KEY }}
GOOGLE_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
GOOGLE_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY }}"
AWS_S3_BACKEND_SECRET_KEY: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY }}"
AWS_S3_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_S3_BACKEND_ACCESS_KEY_2 }}"
AWS_S3_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_S3_BACKEND_SECRET_KEY_2 }}"
AWS_GCP_BACKEND_ACCESS_KEY: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY }}"
AWS_GCP_BACKEND_SECRET_KEY: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY }}"
AWS_GCP_BACKEND_ACCESS_KEY_2: "${{ secrets.AWS_GCP_BACKEND_ACCESS_KEY_2 }}"
AWS_GCP_BACKEND_SECRET_KEY_2: "${{ secrets.AWS_GCP_BACKEND_SECRET_KEY_2 }}"
b2backend_B2_STORAGE_ENDPOINT: "${{ secrets.B2BACKEND_B2_STORAGE_ENDPOINT }}"
gcpbackend2_GCP_SERVICE_EMAIL: "${{ secrets.GCP2_SERVICE_EMAIL }}"
gcpbackend2_GCP_SERVICE_KEY: "${{ secrets.GCP2_SERVICE_KEY }}"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "${{ secrets.GCP_SERVICE_EMAIL }}"
gcpbackend_GCP_SERVICE_KEY: "${{ secrets.GCP_SERVICE_KEY }}"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_EMAIL }}
gcpbackendmismatch_GCP_SERVICE_KEY: >-
${{ secrets.GCPBACKENDMISMATCH_GCP_SERVICE_KEY }}
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
# Configs
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- name: Install python deps
run: pip install flake8
- name: Lint Javascript
run: yarn run --silent lint -- --max-warnings 0
- name: Lint Markdown
run: yarn run --silent lint_md
- name: Lint python
run: flake8 $(git ls-files "*.py")
- name: Lint Yaml
run: yamllint -c yamllint.yml $(git ls-files "*.yml")
- name: Unit Coverage
run: |
set -ex
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_legacy_location
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- name: Unit Coverage logs
run: find /tmp/unit -exec cat {} \;
- name: preparing junit files for upload
run: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: artifacts
if: always()
build:
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v5
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v0-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v1-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
file-ft-tests:
strategy:
matrix:
include:
- job-name: file-ft-tests
name: ${{ matrix.job-name }}
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
shell: bash
run: |
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file ft tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
utapi-v2-tests:
runs-on: ubuntu-latest
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
- name: Run file utapi v2 tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip
- name: Setup CI services
run: docker compose --profile pykmip up -d
working-directory: .github/docker
- name: Run file KMIP tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
S3DATA: multiple
S3KMS: file
CI_CEPH: 'true'
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.9'
- name: Install Ruby dependencies
run: |
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies
run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services
run: docker compose --profile ceph up -d
working-directory: .github/docker
env:
S3METADATA: mongodb
- name: Run Ceph multiple backend tests
run: |-
set -ex -o pipefail;
bash .github/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 27018 40
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
S3METADATA: mem
- name: Run Java tests
run: |-
set -ex -o pipefail;
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
working-directory: tests/functional/jaws
- name: Run Ruby tests
run: |-
set -ex -o pipefail;
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests
run: |-
set -ex -o pipefail;
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
S3BACKEND: file
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

View File

@ -1,34 +1,19 @@
FROM node:16.13.2-slim ARG NODE_VERSION=16.20-bullseye-slim
MAINTAINER Giorgio Regni <gr@scality.com>
ENV NO_PROXY localhost,127.0.0.1 FROM node:${NODE_VERSION} as builder
ENV no_proxy localhost,127.0.0.1
EXPOSE 8000
EXPOSE 8002
COPY ./package.json /usr/src/app/
COPY ./yarn.lock /usr/src/app/
WORKDIR /usr/src/app WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y \
curl \
gnupg2
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
RUN apt-get update \ RUN apt-get update \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
build-essential \ build-essential \
ca-certificates \ ca-certificates \
curl \
git \ git \
gnupg2 \
jq \ jq \
python3 \ python3 \
ssh \ ssh \
yarn \
wget \ wget \
libffi-dev \ libffi-dev \
zlib1g-dev \ zlib1g-dev \
@ -37,18 +22,39 @@ RUN apt-get update \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts && ssh-keyscan -H github.com > /root/ssh/known_hosts
ENV PYTHON=python3 ENV PYTHON=python3
RUN yarn cache clean \ COPY package.json yarn.lock /usr/src/app/
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \ RUN npm install typescript -g
&& apt-get autoremove --purge -y python git build-essential \ RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
################################################################################
FROM node:${NODE_VERSION}
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
EXPOSE 8000
EXPOSE 8002
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY . /usr/src/app COPY . /usr/src/app
COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ] CMD [ "yarn", "start" ]

175
README.md
View File

@ -1,10 +1,7 @@
# Zenko CloudServer # Zenko CloudServer with Vitastor Backend
![Zenko CloudServer logo](res/scality-cloudserver-logo.png) ![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview ## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -14,137 +11,71 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud. backend data storage both on-premise or public in the cloud.
CloudServer is useful for Developers, either to run as part of a This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
continous integration test environment to emulate the AWS S3 service locally backend support.
or as an abstraction layer to develop object storage enabled
application on the go.
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/) ## Quick Start with Vitastor
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/) Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## Docker Installation instructions:
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/) ### Install Vitastor
## Contributing Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the ### Install Zenko with Vitastor Backend
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
## Installation - Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Dependencies ### Install and Configure MongoDB
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Clone source code ### Setup Zenko
```shell - Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
git clone https://github.com/scality/S3.git - Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
``` ```
### Install js dependencies ```
AWS_ACCESS_KEY_ID=accessKey1 \
Go to the ./S3 folder, AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```shell
yarn install --frozen-lockfile
``` ```
If you get an error regarding installation of the diskUsage module, # Author & License
please install g++.
If you get an error regarding level-down bindings, try clearing your yarn cache: - [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
```shell (a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,6 +4,7 @@
"metricsPort": 8002, "metricsPort": 8002,
"metricsListenOn": [], "metricsListenOn": [],
"replicationGroupId": "RG001", "replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": { "restEndpoints": {
"localhost": "us-east-1", "localhost": "us-east-1",
"127.0.0.1": "us-east-1", "127.0.0.1": "us-east-1",
@ -101,6 +102,14 @@
"readPreference": "primary", "readPreference": "primary",
"database": "metadata" "database": "metadata"
}, },
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": { "externalBackends": {
"aws_s3": { "aws_s3": {
"httpAgent": { "httpAgent": {

71
config.json.vitastor Normal file
View File

@ -0,0 +1,71 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,8 @@ const constants = {
], ],
// user metadata header to set object locationConstraint // user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint', objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties // declare here all existing service accounts and their properties
// (if any, otherwise an empty object) // (if any, otherwise an empty object)
@ -129,7 +130,7 @@ const constants = {
}, },
}, },
/* eslint-disable camelcase */ /* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true }, externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
// some of the available data backends (if called directly rather // some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided // than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods. // as a string as first parameter of the get/delete methods.
@ -175,6 +176,8 @@ const constants = {
'objectDeleteTagging', 'objectDeleteTagging',
'objectGetTagging', 'objectGetTagging',
'objectPutTagging', 'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
], ],
// response header to be sent when there are invalid // response header to be sent when there are invalid
// user metadata in the object's metadata // user metadata in the object's metadata
@ -195,11 +198,51 @@ const constants = {
'user', 'user',
'bucket', 'bucket',
], ],
arrayOfAllowed: [
'objectPutTagging',
'objectPutLegalHold',
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'], allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'], allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [ lifecycleListing: {
'STANDARD', CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan',
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
], ],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD',
'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER',
]),
supportedSignatureChecksums: new Set([
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
}; };
module.exports = constants; module.exports = constants;

View File

@ -199,6 +199,10 @@ if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi fi
if [[ "$TESTING_MODE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .testingMode=true"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json mv config.json.tmp config.json

View File

@ -2,11 +2,12 @@
## Docker Image Generation ## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com). Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has two namespaces there: CloudServer has a few images there:
* Production Namespace: registry.scality.com/cloudserver * Cloudserver container image: ghcr.io/scality/cloudserver
* Dev Namespace: registry.scality.com/cloudserver-dev * Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash. content with the developer branch's short SHA-1 commit hash.
@ -18,8 +19,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images ## How to Pull Docker Images
```sh ```sh
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash> docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull registry.scality.com/cloudserver/cloudserver:<tag> docker pull ghcr.io/scality/cloudserver:<tag>
``` ```
## Release Process ## Release Process

View File

@ -1,13 +0,0 @@
#!/bin/bash
script_full_path=$(readlink -f "$0")
file_dir=$(dirname "$script_full_path")/..
PACKAGE_VERSION=$(cat $file_dir/package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| tr -d '[[:space:]]')
echo $PACKAGE_VERSION

View File

@ -1,607 +0,0 @@
---
version: 0.2
branches:
feature/*, documentation/*, improvement/*, bugfix/*, w/*, q/*, hotfix/*, dependabot/*, user/*:
stage: pre-merge
models:
- env: &global-env
azurebackend_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackend_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackend_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurebackend2_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key_2)s
azurebackend2_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name_2)s
azurebackend2_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint_2)s
azurebackendmismatch_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurebackendmismatch_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurebackendmismatch_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azurenonexistcontainer_AZURE_STORAGE_ACCESS_KEY: >-
%(secret:azure_storage_access_key)s
azurenonexistcontainer_AZURE_STORAGE_ACCOUNT_NAME: >-
%(secret:azure_storage_account_name)s
azurenonexistcontainer_AZURE_STORAGE_ENDPOINT: >-
%(secret:azure_storage_endpoint)s
azuretest_AZURE_BLOB_ENDPOINT: "%(secret:azure_storage_endpoint)s"
b2backend_B2_ACCOUNT_ID: "%(secret:b2backend_b2_account_id)s"
b2backend_B2_STORAGE_ACCESS_KEY: >-
%(secret:b2backend_b2_storage_access_key)s
GOOGLE_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
GOOGLE_SERVICE_KEY: "%(secret:gcp_service_key)s"
AWS_S3_BACKEND_ACCESS_KEY: "%(secret:aws_s3_backend_access_key)s"
AWS_S3_BACKEND_SECRET_KEY: "%(secret:aws_s3_backend_secret_key)s"
AWS_S3_BACKEND_ACCESS_KEY_2: "%(secret:aws_s3_backend_access_key_2)s"
AWS_S3_BACKEND_SECRET_KEY_2: "%(secret:aws_s3_backend_secret_key_2)s"
AWS_GCP_BACKEND_ACCESS_KEY: "%(secret:aws_gcp_backend_access_key)s"
AWS_GCP_BACKEND_SECRET_KEY: "%(secret:aws_gcp_backend_secret_key)s"
AWS_GCP_BACKEND_ACCESS_KEY_2: "%(secret:aws_gcp_backend_access_key_2)s"
AWS_GCP_BACKEND_SECRET_KEY_2: "%(secret:aws_gcp_backend_secret_key_2)s"
b2backend_B2_STORAGE_ENDPOINT: "%(secret:b2backend_b2_storage_endpoint)s"
gcpbackend2_GCP_SERVICE_EMAIL: "%(secret:gcp2_service_email)s"
gcpbackend2_GCP_SERVICE_KEY: "%(secret:gcp2_service_key)s"
gcpbackend2_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackend_GCP_SERVICE_EMAIL: "%(secret:gcp_service_email)s"
gcpbackend_GCP_SERVICE_KEY: "%(secret:gcp_service_key)s"
gcpbackendmismatch_GCP_SERVICE_EMAIL: >-
%(secret:gcpbackendmismatch_gcp_service_email)s
gcpbackendmismatch_GCP_SERVICE_KEY: >-
%(secret:gcpbackendmismatch_gcp_service_key)s
gcpbackend_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendmismatch_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendnoproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
gcpbackendproxy_GCP_SERVICE_KEYFILE: /root/.gcp/servicekey
- env: &mongo-vars
S3BACKEND: "mem"
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: "file"
- env: &multiple-backend-vars
S3BACKEND: "mem"
S3DATA: "multiple"
MPU_TESTING: "yes"
S3KMS: "file"
- env: &file-mem-mpu
S3BACKEND: "file"
S3VAULT: "mem"
MPU_TESTING: "yes"
- env: &oras
REGISTRY: 'registry.scality.com'
PROJECT: '%(prop:git_slug)s'
LAYERS: >-
dashboard.json:application/grafana-dashboard+json
alerts.yaml:application/prometheus-alerts+yaml
- Git: &clone
name: Pull repo
repourl: '%(prop:git_reference)s'
shallow: true
retryFetch: true
haltOnFailure: true
- ShellCommand: &credentials
name: Setup Credentials
command: bash eve/workers/build/credentials.bash
haltOnFailure: true
env: *global-env
- ShellCommand: &yarn-install
name: install modules
command: yarn install --ignore-engines --frozen-lockfile --network-concurrency=1
haltOnFailure: true
env:
NODE_OPTIONS: --max-old-space-size=8192
- ShellCommand: &check-s3-action-logs
name: Check s3 action logs
command: |
LOGS=`cat /artifacts/s3.log | grep 'No actionLog'`
test `echo -n ${LOGS} | wc -l` -eq 0 || (echo $LOGS && false)
- Upload: &upload-artifacts
source: /artifacts
urls:
- "*"
- ShellCommand: &follow-s3-log
logfiles:
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &follow-s3-ceph-logs
logfiles:
ceph:
filename: /artifacts/ceph.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
- ShellCommand: &add-hostname
name: add hostname
command: |
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
echo \
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
>> /etc/hosts
haltOnFailure: true
- ShellCommand: &setup-junit-upload
name: preparing junit files for upload
command: |
mkdir -p artifacts/junit
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
alwaysRun: true
- Upload: &upload-junits
source: artifacts
urls:
- "*"
alwaysRun: true
- env: &docker_env
DEVELOPMENT_DOCKER_IMAGE_NAME: >-
registry.scality.com/%(prop:git_slug)s-dev/%(prop:git_slug)s
PRODUCTION_DOCKER_IMAGE_NAME: >-
registry.scality.com/%(prop:git_slug)s/%(prop:git_slug)s
- ShellCommand: &docker_login
name: Login to docker registry
command: >
docker login
-u "${HARBOR_LOGIN}"
-p "${HARBOR_PASSWORD}"
registry.scality.com
usePTY: true
env:
HARBOR_LOGIN: '%(secret:harbor_login)s'
HARBOR_PASSWORD: '%(secret:harbor_password)s'
- ShellCommand: &wait_docker_daemon
name: Wait for Docker daemon to be ready
command: |
bash -c '
for i in {1..150}
do
docker info &> /dev/null && exit
sleep 2
done
echo "Could not reach Docker daemon from buildbot worker" >&2
exit 1'
haltOnFailure: true
stages:
pre-merge:
worker:
type: local
steps:
- TriggerStages:
name: Launch all workers
stage_names:
- docker-build
- linting-coverage
- file-ft-tests
- multiple-backend-test
- mongo-v0-ft-tests
- mongo-v1-ft-tests
- ceph-backend-tests
- kmip-ft-tests
- utapi-v2-tests
waitForFinish: true
haltOnFailure: true
linting-coverage:
worker:
type: docker
path: eve/workers/build
volumes: &default_volumes
- '/home/eve/workspace'
steps:
- Git: *clone
- ShellCommand: *yarn-install
- ShellCommand: *add-hostname
- ShellCommand: *credentials
- ShellCommand:
name: Unit Coverage mandatory file
command: |
set -ex
test -f .git/HEAD
- ShellCommand:
name: Linting
command: |
set -ex
yarn run --silent lint -- --max-warnings 0
yarn run --silent lint_md
flake8 $(git ls-files "*.py")
yamllint -c yamllint.yml $(git ls-files "*.yml")
- ShellCommand:
name: Unit Coverage
command: |
set -ex
unset HTTP_PROXY HTTPS_PROXY NO_PROXY
unset http_proxy https_proxy no_proxy
mkdir -p $CIRCLE_TEST_REPORTS/unit
yarn test
yarn run test_versionid_base62
yarn run test_legacy_location
env: &shared-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
CIRCLE_TEST_REPORTS: /tmp
CIRCLE_ARTIFACTS: /tmp
CI_REPORTS: /tmp
- ShellCommand:
name: Unit Coverage logs
command: find /tmp/unit -exec cat {} \;
- ShellCommand: *setup-junit-upload
- Upload: *upload-junits
multiple-backend-test:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2560Mi"
s3Mem: "2560Mi"
env:
<<: *multiple-backend-vars
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test
yarn run ft_awssdk_external_backends"
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
<<: *global-env
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-log
env:
<<: *multiple-backend-vars
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
ceph-backend-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
ceph: eve/workers/ceph
vars:
aggressorMem: "2500Mi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *multiple-backend-vars
<<: *global-env
S3METADATA: mongodb
CI_CEPH: "true"
MPU_TESTING: "yes"
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash eve/workers/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 27018 40
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test"
env:
<<: *multiple-backend-vars
<<: *global-env
S3METADATA: mem
<<: *follow-s3-ceph-logs
- ShellCommand:
command: mvn test
workdir: build/tests/functional/jaws
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: rspec tests.rb
workdir: build/tests/functional/fog
<<: *follow-s3-ceph-logs
env:
<<: *multiple-backend-vars
- ShellCommand:
command: |
yarn run ft_awssdk &&
yarn run ft_s3cmd
env:
<<: *file-mem-mpu
<<: *global-env
S3METADATA: mongodb
S3_LOCATION_FILE: "/kube_pod-prod-cloudserver-backend-0/\
build/tests/locationConfig/locationConfigCeph.json"
<<: *follow-s3-ceph-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
mongo-v0-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
env:
<<: *mongo-vars
<<: *global-env
DEFAULT_BUCKET_KEY_FORMAT: "v0"
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
<<: *follow-s3-log
env:
<<: *mongo-vars
<<: *global-env
DEFAULT_BUCKET_KEY_FORMAT: "v0"
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
mongo-v1-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
env:
<<: *mongo-vars
<<: *global-env
DEFAULT_BUCKET_KEY_FORMAT: "v1"
METADATA_MAX_CACHED_BUCKETS: "1"
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
yarn run ft_mixed_bucket_format_version
<<: *follow-s3-log
env:
<<: *mongo-vars
<<: *global-env
DEFAULT_BUCKET_KEY_FORMAT: "v1"
METADATA_MAX_CACHED_BUCKETS: "1"
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
file-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "3Gi"
s3Mem: "2560Mi"
redis: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
yarn run ft_test
<<: *follow-s3-log
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
kmip-ft-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
pykmip: eve/workers/pykmip
vars:
aggressorMem: "2Gi"
s3Mem: "1664Mi"
redis: enabled
pykmip: enabled
env:
<<: *file-mem-mpu
<<: *global-env
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
set -ex
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip
logfiles:
pykmip:
filename: /artifacts/pykmip.log
follow: true
s3:
filename: /artifacts/s3.log
follow: true
env:
<<: *file-mem-mpu
<<: *global-env
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
utapi-v2-tests:
worker:
type: kube_pod
path: eve/workers/pod.yaml
images:
aggressor: eve/workers/build
s3: "."
vars:
aggressorMem: "2Gi"
s3Mem: "2Gi"
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
steps:
- Git: *clone
- ShellCommand: *credentials
- ShellCommand: *yarn-install
- ShellCommand:
command: |
bash -c "
source /root/.aws/exports &> /dev/null
set -ex
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2"
<<: *follow-s3-log
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
- ShellCommand: *check-s3-action-logs
- ShellCommand: *setup-junit-upload
- Upload: *upload-artifacts
- Upload: *upload-junits
# The docker-build stage ensures that your images are built on every commit
# and also hosted on the registry to help you pull it up and
# test it in a real environment if needed.
# It also allows us to pull and rename it when performing a release.
docker-build:
worker: &docker_worker
type: kube_pod
path: eve/workers/docker/pod.yaml
images:
worker: eve/workers/docker
steps:
- Git: *clone
- ShellCommand: *wait_docker_daemon
- ShellCommand: *docker_login
- ShellCommand:
name: docker build
command: >-
docker build .
--tag=${DEVELOPMENT_DOCKER_IMAGE_NAME}:%(prop:commit_short_revision)s
env: *docker_env
haltOnFailure: true
- ShellCommand:
name: push docker image into the development namespace
command: docker push ${DEVELOPMENT_DOCKER_IMAGE_NAME}
haltOnFailure: true
env: *docker_env
- ShellCommand: &oras_login
name: Oras login
command:
oras login --username "${HARBOR_LOGIN}" --password "${HARBOR_PASSWORD}" ${REGISTRY}
env:
<<: *oras
HARBOR_LOGIN: '%(secret:harbor_login)s'
HARBOR_PASSWORD: '%(secret:harbor_password)s'
- ShellCommand:
name: push dashboards to the development namespace
command: |
for revision in %(prop:commit_short_revision)s latest ; do
oras push ${REGISTRY}/${PROJECT}-dev/${PROJECT}-dashboards:$revision ${LAYERS}
done
env: *oras
workdir: build/monitoring/
# This stage can be used to release your Docker image.
# To use this stage:
# 1. Tag the repository
# 2. Force a build using:
# * A branch that ideally matches the tag
# * The release stage
# * An extra property with the name tag and its value being the actual tag
release:
worker:
type: local
steps:
- TriggerStages:
stage_names:
- docker-release
haltOnFailure: true
docker-release:
worker: *docker_worker
steps:
- Git: *clone
- ShellCommand: *wait_docker_daemon
- ShellCommand: *docker_login
- ShellCommand:
name: Checkout tag
command: git checkout refs/tags/%(prop:tag)s
haltOnFailure: true
- ShellCommand:
name: docker build
command: >-
docker build .
--tag=${PRODUCTION_DOCKER_IMAGE_NAME}:%(prop:tag)s
env: *docker_env
- ShellCommand:
name: publish docker image to Scality Production OCI registry
command: docker push ${PRODUCTION_DOCKER_IMAGE_NAME}:%(prop:tag)s
env: *docker_env
- ShellCommand: *oras_login
- ShellCommand:
name: push dashboards to the production namespace
command: |
oras push ${REGISTRY}/${PROJECT}/${PROJECT}-dashboards:%(prop:tag)s ${LAYERS}
env: *oras
workdir: build/monitoring/

View File

@ -1,62 +0,0 @@
FROM buildpack-deps:bionic-curl
#
# Install packages needed by the buildchain
#
ENV LANG C.UTF-8
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
&& apt-get update \
&& apt-get install -y yarn \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& update-ca-certificates \
&& git clone https://github.com/tj/n.git \
&& make -C ./n \
&& n 16.13.2 \
&& pip install pip==9.0.1 \
&& rm -rf ./n \
&& rm -rf /var/lib/apt/lists/* \
&& rm -f /tmp/packages.list
#
# Add user eve
#
RUN adduser -u 1042 --home /home/eve --disabled-password --gecos "" eve \
&& adduser eve sudo \
&& sed -ri 's/(%sudo.*)ALL$/\1NOPASSWD:ALL/' /etc/sudoers
#
# Install Dependencies
#
# Install RVM and gems
ENV RUBY_VERSION="2.5.0"
RUN gem update --system
RUN gpg2 --keyserver hkp://pgp.mit.edu --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB \
&& curl -sSL https://get.rvm.io | bash -s stable --ruby=$RUBY_VERSION \
&& usermod -a -G rvm eve
COPY ./gems.list /tmp/
RUN /bin/bash -l -c "\
source /usr/local/rvm/scripts/rvm \
&& cat /tmp/gems.list | xargs gem install \
&& rm /tmp/gems.list"
# Install Pip packages
COPY ./pip_packages.list /tmp/
RUN cat /tmp/pip_packages.list | xargs pip install \
&& rm -f /tmp/pip_packages.list \
&& mkdir /home/eve/.aws \
&& chown eve /home/eve/.aws
#
# Run buildbot-worker on startup
#
ARG BUILDBOT_VERSION
RUN pip install buildbot-worker==$BUILDBOT_VERSION
CMD ["/bin/bash", "-l", "-c", "buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS && buildbot-worker start --nodaemon"]

View File

@ -1,14 +0,0 @@
ca-certificates
git
git-lfs
gnupg
libffi-dev
libssl-dev
python-pip
python2.7
python2.7-dev
software-properties-common
sudo
tcl
wget
procps

View File

@ -1,5 +0,0 @@
nokogiri:1.12.5
fog-aws:1.3.0
json
mime-types:3.1
rspec:3.5

View File

@ -1,3 +0,0 @@
flake8
s3cmd==1.6.1
yamllint

View File

@ -1,15 +0,0 @@
build-essential
ca-certificates
curl
default-jdk
gnupg2
libdigest-hmac-perl
lsof
maven
netcat
redis-server
yarn
zlib1g-dev
jq
openssl
ruby-full

View File

@ -1,33 +0,0 @@
FROM centos:7
ARG BUILDBOT_VERSION=0.9.12
VOLUME /home/eve/workspace
WORKDIR /home/eve/workspace
RUN yum install -y epel-release \
&& yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo \
&& yum install -y \
python-devel \
python-pip \
python36 \
python36-devel \
python36-pip \
git \
docker-ce-cli-18.09.6 \
which \
&& adduser -u 1042 --home /home/eve eve --groups docker \
&& chown -R eve:eve /home/eve \
&& pip3 install buildbot-worker==${BUILDBOT_VERSION}
ARG ORAS_VERSION=0.12.0
RUN curl -LO https://github.com/oras-project/oras/releases/download/v${ORAS_VERSION}/oras_${ORAS_VERSION}_linux_amd64.tar.gz && \
mkdir -p oras-install/ && \
tar -zxf oras_${ORAS_VERSION}_*.tar.gz -C /usr/local/bin oras && \
rm -rf oras_${ORAS_VERSION}_*.tar.gz oras-install/
CMD buildbot-worker create-worker . ${BUILDMASTER}:${BUILDMASTER_PORT} ${WORKERNAME} ${WORKERPASS} && buildbot-worker start --nodaemon

View File

@ -1,43 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: worker
spec:
containers:
- name: build-worker
image: "{{ images.worker }}"
resources:
requests:
cpu: "250m"
memory: 2Gi
limits:
cpu: "1"
memory: 2Gi
env:
- name: DOCKER_HOST
value: localhost:2375
volumeMounts:
- name: worker-workspace
mountPath: /home/eve/workspace
- name: dind-daemon
image: docker:18.09.6-dind
resources:
requests:
cpu: "500m"
memory: 2Gi
limits:
cpu: "1"
memory: 2Gi
securityContext:
privileged: true
volumeMounts:
- name: docker-storage
mountPath: /var/lib/docker
- name: worker-workspace
mountPath: /home/eve/workspace
volumes:
- name: docker-storage
emptyDir: {}
- name: worker-workspace
emptyDir: {}

View File

@ -1,233 +0,0 @@
---
apiVersion: v1
kind: Pod
metadata:
name: "proxy-ci-test-pod"
spec:
restartPolicy: Never
terminationGracePeriodSeconds: 10
hostAliases:
- ip: "127.0.0.1"
hostnames:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
- "testrequestbucket.localhost"
- "pykmip.local"
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
initContainers:
- name: kmip-certs-installer
image: {{ images.pykmip }}
command: [ 'sh', '-c', 'cp /ssl/* /ssl-kmip/']
volumeMounts:
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
{%- endif %}
containers:
{% if vars.env.S3METADATA is defined and vars.env.S3METADATA == "mongodb" -%}
- name: mongo
image: scality/ci-mongo:3.6.8
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 500m
memory: 1Gi
{%- endif %}
- name: aggressor
image: {{ images.aggressor }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "1"
memory: {{ vars.aggressorMem }}
limits:
cpu: "1"
memory: {{ vars.aggressorMem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: artifacts
readOnly: true
mountPath: /artifacts
command:
- bash
- -lc
- |
buildbot-worker create-worker . $BUILDMASTER:$BUILDMASTER_PORT $WORKERNAME $WORKERPASS
buildbot-worker start --nodaemon
env:
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
- name: s3
image: {{ images.s3 }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: "1750m"
memory: {{ vars.s3Mem }}
limits:
cpu: "1750m"
memory: {{ vars.s3Mem }}
volumeMounts:
- name: creds
readOnly: false
mountPath: /root/.aws
- name: certs
readOnly: false
mountPath: /tmp
- name: artifacts
readOnly: false
mountPath: /artifacts
- name: kmip-certs
readOnly: false
mountPath: /ssl-kmip
command:
- bash
- -ec
- |
sleep 10 # wait for mongo
/usr/src/app/docker-entrypoint.sh yarn start | tee -a /artifacts/s3.log
env:
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is not defined -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigTests.json"
{%- endif %}
{% if vars.env.S3DATA is defined and vars.env.S3DATA == "multiple" and vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: S3_LOCATION_FILE
value: "/usr/src/app/tests/locationConfig/locationConfigCeph.json"
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: S3KMS
value: kmip
- name: S3KMIP_PORT
value: "5696"
- name: S3KMIP_HOSTS
value: "pykmip.local"
- name: S3KMIP_COMPOUND_CREATE
value: "false"
- name: S3KMIP_BUCKET_ATTRIBUTE_NAME
value: ''
- name: S3KMIP_PIPELINE_DEPTH
value: "8"
- name: S3KMIP_KEY
value: /ssl-kmip/kmip-client-key.pem
- name: S3KMIP_CERT
value: /ssl-kmip/kmip-client-cert.pem
- name: S3KMIP_CA
value: /ssl-kmip/kmip-ca.pem
{%- endif %}
- name: CI
value: "true"
- name: ENABLE_LOCAL_CACHE
value: "true"
- name: MONGODB_HOSTS
value: "localhost:27018"
- name: MONGODB_RS
value: "rs0"
- name: REDIS_HOST
value: "localhost"
- name: REDIS_PORT
value: "6379"
- name: REPORT_TOKEN
value: "report-token-1"
- name: REMOTE_MANAGEMENT_DISABLE
value: "1"
- name: HEALTHCHECKS_ALLOWFROM
value: "0.0.0.0/0"
{% for key, value in vars.env.items() %}
- name: {{ key }}
value: "{{ value }}"
{% endfor %}
{% if vars.redis is defined and vars.redis == "enabled" -%}
- name: redis
image: redis:alpine
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 200m
memory: 128Mi
limits:
cpu: 200m
memory: 128Mi
{%- endif %}
{% if vars.env.CI_PROXY is defined and vars.env.CI_PROXY == "true" -%}
- name: squid
image: scality/ci-squid
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 250m
memory: 128Mi
limits:
cpu: 250m
memory: 128Mi
volumeMounts:
- name: certs
readOnly: false
mountPath: /ssl
command:
- sh
- -exc
- |
mkdir -p /ssl
openssl req -new -newkey rsa:2048 -sha256 -days 365 -nodes -x509 \
-subj "/C=US/ST=Country/L=City/O=Organization/CN=CN=scality-proxy" \
-keyout /ssl/myca.pem -out /ssl/myca.pem
cp /ssl/myca.pem /ssl/CA.pem
squid -f /etc/squid/squid.conf -N -z
squid -f /etc/squid/squid.conf -NYCd 1
{%- endif %}
{% if vars.env.CI_CEPH is defined and vars.env.CI_CEPH == "true" -%}
- name: ceph
image: {{ images.ceph }}
imagePullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1536Mi
limits:
cpu: 500m
memory: 1536Mi
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
{%- endif %}
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
- name: pykmip
image: {{ images.pykmip }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: artifacts
readOnly: false
mountPath: /artifacts
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
{%- endif %}
volumes:
- name: creds
emptyDir: {}
- name: certs
emptyDir: {}
- name: artifacts
emptyDir: {}
- name: kmip-certs
emptyDir: {}

View File

@ -48,7 +48,7 @@ signed_headers = 'host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \ canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers, .format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash) signed_headers, payload_hash)
print canonical_request print(canonical_request)
credential_scope = '{0}/{1}/{2}/aws4_request' \ credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service) .format(date_stamp, region, service)

View File

@ -1,50 +1,28 @@
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0 FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
WORKDIR /usr/src/app ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
# Keep the .git directory in order to properly report version COPY . ${HOME_DIR}/s3
COPY ./package.json yarn.lock ./ RUN chown -R ${USER} ${HOME_DIR}
RUN pip3 install redis===3.5.3 requests==2.27.1 && \
apt-get install -y git-lfs
ENV PYTHON=python3.9 USER ${USER}
ENV PY_VERSION=3.9.7 WORKDIR ${HOME_DIR}/s3
RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \
yarn global remove typescript
RUN apt-get update \ # run symlinking separately to avoid yarn installation errors
&& apt-get install -y --no-install-recommends \ # we might have to check if the symlinking is really needed!
jq \ RUN ln -sf /scality-kms node_modules
python \
git \
build-essential \
ssh \
ca-certificates \
wget \
libffi-dev \
zlib1g-dev \
&& apt-get clean \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
RUN cd /tmp \
&& wget https://www.python.org/ftp/python/$PY_VERSION/Python-$PY_VERSION.tgz \
&& tar -C /usr/local/bin -xzvf Python-$PY_VERSION.tgz \
&& cd /usr/local/bin/Python-$PY_VERSION \
&& ./configure --enable-optimizations \
&& make \
&& make altinstall \
&& rm -rf /tmp/Python-$PY_VERSION.tgz
RUN yarn cache clean \
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
COPY ./ ./
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
EXPOSE 8000 EXPOSE 8000
CMD bash -c "source ${CONF_DIR}/env && export && supervisord -c ${CONF_DIR}/supervisord.conf"

View File

@ -1,3 +1,10 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')(); require('./lib/server.js')();

View File

@ -8,16 +8,18 @@ const crypto = require('crypto');
const { v4: uuidv4 } = require('uuid'); const { v4: uuidv4 } = require('uuid');
const cronParser = require('cron-parser'); const cronParser = require('cron-parser');
const joi = require('@hapi/joi'); const joi = require('@hapi/joi');
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
const { isValidBucketName } = require('arsenal').s3routes.routesUtils; const { isValidBucketName } = s3routes.routesUtils;
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig; const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder'); const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const validExternalBackends = require('../constants').externalBackends; const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex, const { azureAccountNameRegex, base64Regex,
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates, allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
} = require('../constants'); } = require('../constants');
const { utapiVersion } = require('utapi'); const { utapiVersion } = require('utapi');
const { scaleMsPerDay } = s3middleware.objectUtils;
const constants = require('../constants');
// config paths // config paths
const configSearchPaths = [ const configSearchPaths = [
@ -105,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config'); return joi.attempt(configSproxyd, joiSchema, 'bad config');
} }
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) { function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object', assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints'); 'bad config: restEndpoints must be an object of endpoints');
@ -135,26 +178,71 @@ function gcpLocationConstraintAssert(location, locationObj) {
}); });
} }
function azureLocationConstraintAssert(location, locationObj) { function azureGetStorageAccountName(location, locationDetails) {
const { const { azureStorageAccountName } = locationDetails;
azureStorageEndpoint,
azureStorageAccountName,
azureStorageAccessKey,
azureContainerName,
} = locationObj.details;
const storageEndpointFromEnv =
process.env[`${location}_AZURE_STORAGE_ENDPOINT`];
const storageAccountNameFromEnv = const storageAccountNameFromEnv =
process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`]; process.env[`${location}_AZURE_STORAGE_ACCOUNT_NAME`];
const storageAccessKeyFromEnv = return storageAccountNameFromEnv || azureStorageAccountName;
process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`]; }
const locationParams = {
azureStorageEndpoint: storageEndpointFromEnv || azureStorageEndpoint, function azureGetLocationCredentials(location, locationDetails) {
azureStorageAccountName: const storageAccessKey =
storageAccountNameFromEnv || azureStorageAccountName, process.env[`${location}_AZURE_STORAGE_ACCESS_KEY`] ||
azureStorageAccessKey: storageAccessKeyFromEnv || azureStorageAccessKey, locationDetails.azureStorageAccessKey;
azureContainerName, const sasToken =
process.env[`${location}_AZURE_SAS_TOKEN`] ||
locationDetails.sasToken;
const clientKey =
process.env[`${location}_AZURE_CLIENT_KEY`] ||
locationDetails.clientKey;
const authMethod =
process.env[`${location}_AZURE_AUTH_METHOD`] ||
locationDetails.authMethod ||
(storageAccessKey && 'shared-key') ||
(sasToken && 'shared-access-signature') ||
(clientKey && 'client-secret') ||
'shared-key';
switch (authMethod) {
case 'shared-key':
default:
return {
authMethod,
storageAccountName:
azureGetStorageAccountName(location, locationDetails),
storageAccessKey,
}; };
case 'shared-access-signature':
return {
authMethod,
sasToken,
};
case 'client-secret':
return {
authMethod,
tenantId:
process.env[`${location}_AZURE_TENANT_ID`] ||
locationDetails.tenantId,
clientId:
process.env[`${location}_AZURE_CLIENT_ID`] ||
locationDetails.clientId,
clientKey,
};
}
}
function azureLocationConstraintAssert(location, locationObj) {
const locationParams = {
...azureGetLocationCredentials(location, locationObj.details),
azureStorageEndpoint:
process.env[`${location}_AZURE_STORAGE_ENDPOINT`] ||
locationObj.details.azureStorageEndpoint,
azureContainerName: locationObj.details.azureContainerName,
};
Object.keys(locationParams).forEach(param => { Object.keys(locationParams).forEach(param => {
const value = locationParams[param]; const value = locationParams[param];
assert.notEqual(value, undefined, assert.notEqual(value, undefined,
@ -164,13 +252,16 @@ function azureLocationConstraintAssert(location, locationObj) {
`bad location constraint: "${location}" ${param} ` + `bad location constraint: "${location}" ${param} ` +
`"${value}" must be a string`); `"${value}" must be a string`);
}); });
assert(azureAccountNameRegex.test(locationParams.azureStorageAccountName),
if (locationParams.authMethod === 'shared-key') {
assert(azureAccountNameRegex.test(locationParams.storageAccountName),
`bad location constraint: "${location}" azureStorageAccountName ` + `bad location constraint: "${location}" azureStorageAccountName ` +
`"${locationParams.storageAccountName}" is an invalid value`); `"${locationParams.storageAccountName}" is an invalid value`);
assert(base64Regex.test(locationParams.azureStorageAccessKey), assert(base64Regex.test(locationParams.storageAccessKey),
`bad location constraint: "${location}" ` + `bad location constraint: "${location}" ` +
'azureStorageAccessKey is not a valid base64 string'); 'azureStorageAccessKey is not a valid base64 string');
assert(isValidBucketName(azureContainerName, []), }
assert(isValidBucketName(locationParams.azureContainerName, []),
`bad location constraint: "${location}" ` + `bad location constraint: "${location}" ` +
'azureContainerName is an invalid container name'); 'azureContainerName is an invalid container name');
} }
@ -189,6 +280,60 @@ function hdClientLocationConstraintAssert(configHd) {
return hdclientFields; return hdclientFields;
} }
function azureArchiveLocationConstraintAssert(locationObj) {
const checkedFields = [
'azureContainerName',
'azureStorageEndpoint',
];
if (Object.keys(locationObj.details).length === 0 ||
!checkedFields.every(field => field in locationObj.details)) {
return;
}
const {
azureContainerName,
azureStorageEndpoint,
} = locationObj.details;
const stringFields = [
azureContainerName,
azureStorageEndpoint,
];
stringFields.forEach(field => {
assert(typeof field === 'string',
`bad config: ${field} must be a string`);
});
let hasAuthMethod = false;
if (locationObj.details.sasToken !== undefined) {
assert(typeof locationObj.details.sasToken === 'string',
`bad config: ${locationObj.details.sasToken} must be a string`);
hasAuthMethod = true;
}
if (locationObj.details.azureStorageAccountName !== undefined &&
locationObj.details.azureStorageAccessKey !== undefined) {
assert(typeof locationObj.details.azureStorageAccountName === 'string',
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
if (locationObj.details.tenantId !== undefined &&
locationObj.details.clientId !== undefined &&
locationObj.details.clientKey !== undefined) {
assert(typeof locationObj.details.tenantId === 'string',
`bad config: ${locationObj.details.tenantId} must be a string`);
assert(typeof locationObj.details.clientId === 'string',
`bad config: ${locationObj.details.clientId} must be a string`);
assert(typeof locationObj.details.clientKey === 'string',
`bad config: ${locationObj.details.clientKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
assert(hasAuthMethod, 'Missing authentication method');
}
function dmfLocationConstraintAssert(locationObj) { function dmfLocationConstraintAssert(locationObj) {
const checkedFields = [ const checkedFields = [
'endpoint', 'endpoint',
@ -232,7 +377,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) { function locationConstraintAssert(locationConstraints) {
const supportedBackends = const supportedBackends =
['mem', 'file', 'scality', ['mem', 'file', 'scality',
'mongodb', 'dmf'].concat(Object.keys(validExternalBackends)); 'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object', assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object'); 'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => { Object.keys(locationConstraints).forEach(l => {
@ -343,6 +488,9 @@ function locationConstraintAssert(locationConstraints) {
if (locationConstraints[l].type === 'dmf') { if (locationConstraints[l].type === 'dmf') {
dmfLocationConstraintAssert(locationConstraints[l]); dmfLocationConstraintAssert(locationConstraints[l]);
} }
if (locationConstraints[l].type === 'azure_archive') {
azureArchiveLocationConstraintAssert(locationConstraints[l]);
}
if (locationConstraints[l].type === 'pfs') { if (locationConstraints[l].type === 'pfs') {
assert(typeof details.pfsDaemonEndpoint === 'object', assert(typeof details.pfsDaemonEndpoint === 'object',
'bad config: pfsDaemonEndpoint is mandatory and must be an object'); 'bad config: pfsDaemonEndpoint is mandatory and must be an object');
@ -354,26 +502,33 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient); locationConstraints[l].details.connector.hdclient);
} }
}); });
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
} }
function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) { function parseUtapiReindex(config) {
const {
enabled,
schedule,
redis,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean', assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean'); 'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object'); const parsedRedis = parseRedisConfig(redis);
assert(typeof sentinel.port === 'number', assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi.reindex.sentinel.port must be a number'); 'bad config: utapi reindex redis config requires a list of sentinels');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object', assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object'); 'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number', assert(typeof bucketd.port === 'number',
'bad config: utapi.reindex.bucketd.port must be a number'); 'bad config: utapi.reindex.bucketd.port must be a number');
assert(typeof schedule === 'string', assert(typeof schedule === 'string',
'bad config: utapi.reindex.schedule must be a string'); 'bad config: utapi.reindex.schedule must be a string');
if (onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
}
try { try {
cronParser.parseExpression(schedule); cronParser.parseExpression(schedule);
} catch (e) { } catch (e) {
@ -381,6 +536,13 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
'bad config: utapi.reindex.schedule must be a valid ' + 'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`); `cron schedule. ${e.message}.`);
} }
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
} }
function requestsConfigAssert(requestsConfig) { function requestsConfigAssert(requestsConfig) {
@ -468,7 +630,6 @@ class Config extends EventEmitter {
// Read config automatically // Read config automatically
this._getLocationConfig(); this._getLocationConfig();
this._getConfig(); this._getConfig();
this._configureBackends();
} }
_getLocationConfig() { _getLocationConfig() {
@ -680,11 +841,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints; this.websiteEndpoints = config.websiteEndpoints;
} }
this.clusters = false; this.workers = false;
if (config.clusters !== undefined) { if (config.workers !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0, assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: clusters must be a positive integer'); 'bad config: workers must be a positive integer');
this.clusters = config.clusters; this.workers = config.workers;
} }
if (config.usEastBehavior !== undefined) { if (config.usEastBehavior !== undefined) {
@ -922,8 +1083,7 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number', assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number'); 'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) { if (config.localCache.password !== undefined) {
assert( assert(typeof config.localCache.password === 'string',
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' + 'config: vad password for localCache. password must' +
' be a string'); ' be a string');
} }
@ -949,56 +1109,46 @@ class Config extends EventEmitter {
} }
if (config.redis) { if (config.redis) {
if (config.redis.sentinels) { this.redis = parseRedisConfig(config.redis);
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
} }
if (config.scuba) {
if (config.redis.sentinelPassword !== undefined) { this.scuba = {};
assert( if (config.scuba.host) {
this._verifyRedisPassword(config.redis.sentinelPassword)); assert(typeof config.scuba.host === 'string',
this.redis.sentinelPassword = config.redis.sentinelPassword; 'bad config: scuba host must be a string');
this.scuba.host = config.scuba.host;
} }
} else { if (config.scuba.port) {
// check for standalone configuration assert(Number.isInteger(config.scuba.port)
this.redis = {}; && config.scuba.port > 0,
assert(typeof config.redis.host === 'string', 'bad config: scuba port must be a positive integer');
'bad config: redis.host must be a string'); this.scuba.port = config.scuba.port;
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
} }
} }
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) { if (config.utapi) {
this.utapi = { component: 's3' }; this.utapi = { component: 's3' };
if (config.utapi.host) { if (config.utapi.host) {
@ -1027,50 +1177,8 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' + assert(config.redis, 'missing required property of utapi ' +
'configuration: redis'); 'configuration: redis');
if (config.utapi.redis) { if (config.utapi.redis) {
if (config.utapi.redis.sentinels) { this.utapi.redis = parseRedisConfig(config.utapi.redis);
this.utapi.redis = { sentinels: [], name: null }; if (this.utapi.redis.retry === undefined) {
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = { this.utapi.redis.retry = {
connectBackoff: { connectBackoff: {
min: 10, min: 10,
@ -1081,22 +1189,6 @@ class Config extends EventEmitter {
}, },
}; };
} }
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
} }
if (config.utapi.metrics) { if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics; this.utapi.metrics = config.utapi.metrics;
@ -1166,8 +1258,7 @@ class Config extends EventEmitter {
} }
if (config.utapi && config.utapi.reindex) { if (config.utapi && config.utapi.reindex) {
parseUtapiReindex(config.utapi.reindex); this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
} }
} }
@ -1212,6 +1303,8 @@ class Config extends EventEmitter {
} }
} }
this.authdata = config.authdata || 'authdata.json';
this.kms = {}; this.kms = {};
if (config.kms) { if (config.kms) {
assert(typeof config.kms.userName === 'string'); assert(typeof config.kms.userName === 'string');
@ -1431,25 +1524,6 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs; this.outboundProxy.certs = certObj.certs;
} }
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint: // Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file, // try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort. // then create a fresh one as last resort.
@ -1502,6 +1576,10 @@ class Config extends EventEmitter {
requestsConfigAssert(config.requests); requestsConfigAssert(config.requests);
this.requests = config.requests; this.requests = config.requests;
} }
// CLDSRV-378: on 8.x branches, null version compatibility
// mode is enforced because null keys are not supported by the
// MongoDB backend.
this.nullVersionCompatMode = true;
if (config.bucketNotificationDestinations) { if (config.bucketNotificationDestinations) {
this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations); this.bucketNotificationDestinations = bucketNotifAssert(config.bucketNotificationDestinations);
} }
@ -1510,37 +1588,102 @@ class Config extends EventEmitter {
// Version of the configuration we're running under // Version of the configuration we're running under
this.overlayVersion = config.overlayVersion || 0; this.overlayVersion = config.overlayVersion || 0;
this._setTimeOptions();
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
this.multiObjectDeleteConcurrency = extractedNumber;
}
this.multiObjectDeleteEnableOptimizations = true;
if (config.multiObjectDeleteEnableOptimizations === false) {
this.multiObjectDeleteEnableOptimizations = false;
}
this.testingMode = config.testingMode || false;
this.maxScannedLifecycleListingEntries = constants.maxScannedLifecycleListingEntries;
if (config.maxScannedLifecycleListingEntries !== undefined) {
// maxScannedLifecycleListingEntries > 2 is required as a minimum because we must
// scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
assert(Number.isInteger(config.maxScannedLifecycleListingEntries) &&
config.maxScannedLifecycleListingEntries > 2,
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
// expedite the lifecycle of objects.
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
// The scaledMsPerDay value is initially set to the number of milliseconds per day
// (24 * 60 * 60 * 1000) as the default value.
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
// earlier in time.
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
this.timeOptions = {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
};
}
getTimeOptions() {
return this.timeOptions;
} }
_getAuthData() { _getAuthData() {
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json')); return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
} }
_configureBackends() { _configureBackends(config) {
const backends = config.backends || {};
/** /**
* Configure the backends for Authentication, Data and Metadata. * Configure the backends for Authentication, Data and Metadata.
*/ */
let auth = 'mem'; let auth = backends.auth || 'mem';
let data = 'multiple'; let data = backends.data || 'multiple';
let metadata = 'file'; let metadata = backends.metadata || 'file';
let kms = 'file'; let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) { if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi']; const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1, assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' + 'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi' 'should be one of mem/file/scality/cdmi'
); );
auth = process.env.S3BACKEND; auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
data = process.env.S3BACKEND; data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND; metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND; kms = process.env.S3BACKEND;
} }
if (process.env.S3VAULT) { if (process.env.S3VAULT) {
auth = process.env.S3VAULT; auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
} }
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') { if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file // Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData; let authData;
if (process.env.SCALITY_ACCESS_KEY_ID && if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) { process.env.SCALITY_SECRET_ACCESS_KEY) {
@ -1569,10 +1712,10 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple' 'should be one of mem/file/scality/multiple'
); );
data = process.env.S3DATA; data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') { if (data === 'scality' || data === 'multiple') {
data = 'multiple'; data = 'multiple';
} }
}
assert(this.locationConstraints !== undefined && assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined, this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set' 'bad config: locationConstraints and restEndpoints must be set'
@ -1584,18 +1727,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) { if (process.env.S3KMS) {
kms = process.env.S3KMS; kms = process.env.S3KMS;
} }
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = { this.backends = {
auth, auth,
data, data,
metadata, metadata,
kms, kms,
quota,
}; };
} }
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) { setAuthDataAccounts(accounts) {
this.authData.accounts = accounts; this.authData.accounts = accounts;
this.emit('authdata-update'); this.emit('authdata-update');
@ -1643,8 +1786,7 @@ class Config extends EventEmitter {
getAzureEndpoint(locationConstraint) { getAzureEndpoint(locationConstraint) {
let azureStorageEndpoint = let azureStorageEndpoint =
process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] || process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] ||
this.locationConstraints[locationConstraint] this.locationConstraints[locationConstraint].details.azureStorageEndpoint;
.details.azureStorageEndpoint;
if (!azureStorageEndpoint.endsWith('/')) { if (!azureStorageEndpoint.endsWith('/')) {
// append the trailing slash // append the trailing slash
azureStorageEndpoint = `${azureStorageEndpoint}/`; azureStorageEndpoint = `${azureStorageEndpoint}/`;
@ -1653,23 +1795,40 @@ class Config extends EventEmitter {
} }
getAzureStorageAccountName(locationConstraint) { getAzureStorageAccountName(locationConstraint) {
const { azureStorageAccountName } = const accountName = azureGetStorageAccountName(
this.locationConstraints[locationConstraint].details; locationConstraint,
const storageAccountNameFromEnv = this.locationConstraints[locationConstraint].details
process.env[`${locationConstraint}_AZURE_STORAGE_ACCOUNT_NAME`]; );
return storageAccountNameFromEnv || azureStorageAccountName; if (accountName) {
return accountName;
}
// For SAS & ServicePrincipal, retrieve the accountName from the endpoint
const endpoint = this.getAzureEndpoint(locationConstraint);
const url = new URL(endpoint);
const fragments = url.hostname.split('.', 3);
if (fragments.length === 3 && fragments[1] === 'blob') {
return fragments[0];
}
// We should always reach here, though it may not be the case for "mock" servers,
// where the accoutName is in the path
const path = url.pathname.replace(/^\//, '').replace(/\/$/, '');
if (path) {
return path;
}
// We should not reach here; if that happens, use the endpoint itself, which
// should be close-enough since this function is used for detecting when two
// locations actually point to the same account
return endpoint;
} }
getAzureStorageCredentials(locationConstraint) { getAzureStorageCredentials(locationConstraint) {
const { azureStorageAccessKey } = return azureGetLocationCredentials(
this.locationConstraints[locationConstraint].details; locationConstraint,
const storageAccessKeyFromEnv = this.locationConstraints[locationConstraint].details
process.env[`${locationConstraint}_AZURE_STORAGE_ACCESS_KEY`]; );
return {
storageAccountName:
this.getAzureStorageAccountName(locationConstraint),
storageAccessKey: storageAccessKeyFromEnv || azureStorageAccessKey,
};
} }
getPfsDaemonEndpoint(locationConstraint) { getPfsDaemonEndpoint(locationConstraint) {
@ -1702,13 +1861,25 @@ class Config extends EventEmitter {
.update(instanceId) .update(instanceId)
.digest('hex'); .digest('hex');
} }
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
} }
module.exports = { module.exports = {
parseSproxydConfig, parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert, locationConstraintAssert,
ConfigObject: Config, ConfigObject: Config,
config: new Config(), config: new Config(),
requestsConfigAssert, requestsConfigAssert,
bucketNotifAssert, bucketNotifAssert,
azureGetStorageAccountName,
azureGetLocationCredentials,
azureArchiveLocationConstraintAssert,
}; };

View File

@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite'); const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle'); const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy'); const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet'); const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL'); const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors'); const bucketGetCors = require('./bucketGetCors');
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification'); const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock'); const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy'); const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption'); const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead'); const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut'); const { bucketPut } = require('./bucketPut');
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption'); const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy'); const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock'); const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication'); const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication'); const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight'); const corsPreflight = require('./corsPreflight');
@ -44,7 +47,7 @@ const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete'); const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete'); const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy'); const objectCopy = require('./objectCopy');
const objectDelete = require('./objectDelete'); const { objectDelete } = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging'); const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet'); const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL'); const objectGetACL = require('./objectGetACL');
@ -64,8 +67,7 @@ const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts'); = require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet'); const serviceGet = require('./serviceGet');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const websiteGet = require('./websiteGet'); const website = require('./website');
const websiteHead = require('./websiteHead');
const writeContinue = require('../utilities/writeContinue'); const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders'); const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource'); const parseCopySource = require('./apiUtils/object/parseCopySource');
@ -83,6 +85,10 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server // Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod; request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod]; const actionLog = monitoringMap[apiMethod];
if (!actionLog && if (!actionLog &&
@ -117,6 +123,7 @@ const api = {
// no need to check auth on website or cors preflight requests // no need to check auth on website or cors preflight requests
if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' ||
apiMethod === 'corsPreflight') { apiMethod === 'corsPreflight') {
request.actionImplicitDenies = false;
return this[apiMethod](request, log, callback); return this[apiMethod](request, log, callback);
} }
@ -139,15 +146,25 @@ const api = {
const requestContexts = prepareRequestContexts(apiMethod, request, const requestContexts = prepareRequestContexts(apiMethod, request,
sourceBucket, sourceObject, sourceVersionId); sourceBucket, sourceObject, sourceVersionId);
// Extract all the _apiMethods and store them in an array
const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : [];
// Attach the names to the current request
// eslint-disable-next-line no-param-reassign
request.apiMethods = apiMethods;
function checkAuthResults(authResults) { function checkAuthResults(authResults) {
let returnTagCount = true; let returnTagCount = true;
const isImplicitDeny = {};
let isOnlyImplicitDeny = true;
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action // first item checks s3:GetObject(Version) action
if (!authResults[0].isAllowed) { if (!authResults[0].isAllowed && !authResults[0].isImplicit) {
log.trace('get object authorization denial from Vault'); log.trace('get object authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
// TODO add support for returnTagCount in the bucket policy
// checks
isImplicitDeny[authResults[0].action] = authResults[0].isImplicit;
// second item checks s3:GetObject(Version)Tagging action // second item checks s3:GetObject(Version)Tagging action
if (!authResults[1].isAllowed) { if (!authResults[1].isAllowed) {
log.trace('get tagging authorization denial ' + log.trace('get tagging authorization denial ' +
@ -156,25 +173,41 @@ const api = {
} }
} else { } else {
for (let i = 0; i < authResults.length; i++) { for (let i = 0; i < authResults.length; i++) {
if (!authResults[i].isAllowed) { isImplicitDeny[authResults[i].action] = true;
if (!authResults[i].isAllowed && !authResults[i].isImplicit) {
// Any explicit deny rejects the current API call
log.trace('authorization denial from Vault'); log.trace('authorization denial from Vault');
return errors.AccessDenied; return errors.AccessDenied;
} }
if (authResults[i].isAllowed) {
// If the action is allowed, the result is not implicit
// Deny.
isImplicitDeny[authResults[i].action] = false;
isOnlyImplicitDeny = false;
} }
} }
return returnTagCount; }
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
} }
return async.waterfall([ return async.waterfall([
next => auth.server.doAuth( next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params) => { request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) { if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err }); log.trace('authentication error', { error: err });
return next(err); return next(arsenalError);
} }
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts), }, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, next) => { (userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() }; const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) { if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName(); authNames.userName = userInfo.getIAMdisplayName();
@ -184,7 +217,7 @@ const api = {
} }
log.addDefaultFields(authNames); log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
} }
// issue 100 Continue to the client // issue 100 Continue to the client
writeContinue(request, response); writeContinue(request, response);
@ -215,12 +248,12 @@ const api = {
} }
// Convert array of post buffers into one string // Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString(); request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params); return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}); });
return undefined; return undefined;
}, },
// Tag condition keys require information from CloudServer for evaluation // Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth( (userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults, authorizationResults,
request, request,
requestContexts, requestContexts,
@ -231,33 +264,47 @@ const api = {
log.trace('tag authentication error', { error: err }); log.trace('tag authentication error', { error: err });
return next(err); return next(err);
} }
return next(null, userInfo, authResultsWithTags, streamingV4Params); return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
}, },
), ),
], (err, userInfo, authorizationResults, streamingV4Params) => { ], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) { if (err) {
return callback(err); return callback(err);
} }
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) { if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults); const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) { if (checkedResults instanceof Error) {
return callback(checkedResults); return callback(checkedResults);
} }
returnTagCount = checkedResults; returnTagCount = checkedResults.returnTagCount;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
// thus it is explicitly allowed, so implicit deny is false
request.actionImplicitDenies = apiMethods.reduce((acc, curr) => {
acc[curr] = false;
return acc;
}, {});
} }
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response; request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params, return this[apiMethod](userInfo, request, streamingV4Params,
log, callback, authorizationResults); log, methodCallback, authorizationResults);
} }
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket, return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback); sourceObject, sourceVersionId, log, methodCallback);
} }
if (apiMethod === 'objectGet') { if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback); return this[apiMethod](userInfo, request, returnTagCount, log, callback);
} }
return this[apiMethod](userInfo, request, log, callback); return this[apiMethod](userInfo, request, log, methodCallback);
}); });
}, },
bucketDelete, bucketDelete,
@ -284,11 +331,14 @@ const api = {
bucketPutReplication, bucketPutReplication,
bucketGetReplication, bucketGetReplication,
bucketDeleteReplication, bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle, bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle, bucketGetLifecycle,
bucketDeleteLifecycle, bucketDeleteLifecycle,
bucketPutPolicy, bucketPutPolicy,
bucketGetPolicy, bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy, bucketDeletePolicy,
bucketPutObjectLock, bucketPutObjectLock,
bucketPutNotification, bucketPutNotification,
@ -320,8 +370,8 @@ const api = {
objectPutRetention, objectPutRetention,
objectRestore, objectRestore,
serviceGet, serviceGet,
websiteGet, websiteGet: website,
websiteHead, websiteHead: website,
}; };
module.exports = api; module.exports = api;

View File

@ -1,11 +1,23 @@
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies; const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { config } = require('../../../Config');
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants; const {
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
// whitelist buckets to allow public read on objects // whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ? const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : []; ? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) { function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/'); const canonicalIDArray = canonicalID.split('/');
@ -26,13 +38,41 @@ function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo); return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
} }
function checkBucketAcls(bucket, requestType, canonicalID) { /**
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
* @param {Bucket} bucket - The bucket to check access control for.
* @param {string} requestType - The list of s3 actions to check within the API call.
* @param {string} canonicalID - The canonical ID of the user making the request.
* @param {string} mainApiCall - The main API call (first item of the requestType).
*
* @returns {boolean} - Returns true if the user has the necessary access rights, otherwise false.
*/
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) { if (bucket.getOwner() === canonicalID) {
return true; return true;
} }
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
}
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
if (requestType === 'bucketGet' || requestType === 'bucketHead') { if (requestTypeParsed === 'bucketGet' || requestTypeParsed === 'bucketHead') {
if (bucketAcl.Canned === 'public-read' if (bucketAcl.Canned === 'public-read'
|| bucketAcl.Canned === 'public-read-write' || bucketAcl.Canned === 'public-read-write'
|| (bucketAcl.Canned === 'authenticated-read' || (bucketAcl.Canned === 'authenticated-read'
@ -50,7 +90,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
return true; return true;
} }
} }
if (requestType === 'bucketGetACL') { if (requestTypeParsed === 'bucketGetACL') {
if ((bucketAcl.Canned === 'log-delivery-write' if ((bucketAcl.Canned === 'log-delivery-write'
&& canonicalID === logId) && canonicalID === logId)
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -66,7 +106,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
} }
} }
if (requestType === 'bucketPutACL') { if (requestTypeParsed === 'bucketPutACL') {
if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 if (bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) { || bucketAcl.WRITE_ACP.indexOf(canonicalID) > -1) {
return true; return true;
@ -80,11 +120,7 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
} }
} }
if (requestType === 'bucketDelete' && bucket.getOwner() === canonicalID) { if (requestTypeParsed === 'objectDelete' || requestTypeParsed === 'objectPut') {
return true;
}
if (requestType === 'objectDelete' || requestType === 'objectPut') {
if (bucketAcl.Canned === 'public-read-write' if (bucketAcl.Canned === 'public-read-write'
|| bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1 || bucketAcl.FULL_CONTROL.indexOf(canonicalID) > -1
|| bucketAcl.WRITE.indexOf(canonicalID) > -1) { || bucketAcl.WRITE.indexOf(canonicalID) > -1) {
@ -104,25 +140,39 @@ function checkBucketAcls(bucket, requestType, canonicalID) {
// objectPutACL, objectGetACL, objectHead or objectGet, the bucket // objectPutACL, objectGetACL, objectHead or objectGet, the bucket
// authorization check should just return true so can move on to check // authorization check should just return true so can move on to check
// rights at the object level. // rights at the object level.
return (requestType === 'objectPutACL' || requestType === 'objectGetACL' || return (requestTypeParsed === 'objectPutACL' || requestTypeParsed === 'objectGetACL'
requestType === 'objectGet' || requestType === 'objectHead'); || requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
} }
function checkObjectAcls(bucket, objectMD, requestType, canonicalID) { function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed // acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestType) if (bucketOwnerActions.includes(requestTypeParsed)
&& (bucketOwner === canonicalID)) { && (bucketOwner === canonicalID)) {
return true; return true;
} }
if (objectMD['owner-id'] === canonicalID) { if (objectMD['owner-id'] === canonicalID) {
return true; return true;
} }
// Backward compatibility
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (!objectMD.acl) { if (!objectMD.acl) {
return false; return false;
} }
if (requestType === 'objectGet' || requestType === 'objectHead') { if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
if (objectMD.acl.Canned === 'public-read' if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write' || objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read' || (objectMD.acl.Canned === 'authenticated-read'
@ -148,11 +198,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
// User is already authorized on the bucket for FULL_CONTROL or WRITE or // User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write // bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') { if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
return true; return true;
} }
if (requestType === 'objectPutACL') { if (requestTypeParsed === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -168,7 +218,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
} }
} }
if (requestType === 'objectGetACL') { if (requestTypeParsed === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control' if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID) && bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1 || objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -187,9 +237,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID) {
// allow public reads on buckets that are whitelisted for anonymous reads // allow public reads on buckets that are whitelisted for anonymous reads
// TODO: remove this after bucket policies are implemented // TODO: remove this after bucket policies are implemented
const bucketAcl = bucket.getAcl(); const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName()) && const allowPublicReads = publicReadBuckets.includes(bucket.getName())
bucketAcl.Canned === 'public-read' && && bucketAcl.Canned === 'public-read'
(requestType === 'objectGet' || requestType === 'objectHead'); && (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
if (allowPublicReads) { if (allowPublicReads) {
return true; return true;
} }
@ -216,6 +266,20 @@ function _checkBucketPolicyResources(request, resource, log) {
return evaluators.isResourceApplicable(requestContext, resource, log); return evaluators.isResourceApplicable(requestContext, resource, log);
} }
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) { function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc... // account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12); return arn.substr(13, 12);
@ -260,11 +324,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
return false; return false;
} }
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) { function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
let permission = 'defaultDeny'; let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be // if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy) // allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID) { if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
permission = 'allow'; permission = 'allow';
} }
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement)); let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
@ -273,12 +337,13 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal); const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log); const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log); const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') { if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately // explicit deny trumps any allows, so return immediately
return 'explicitDeny'; return 'explicitDeny';
} }
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') { if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
permission = 'allow'; permission = 'allow';
} }
copiedStatement = copiedStatement.splice(1); copiedStatement = copiedStatement.splice(1);
@ -286,7 +351,37 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
return permission; return permission;
} }
function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request) { function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner, log,
request, aclPermission, results, actionImplicitDenies) {
const bucketPolicy = bucket.getBucketPolicy();
let processedResult = results[requestType];
if (!bucketPolicy) {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
} else if (bucketPolicyPermission === 'allow') {
processedResult = true;
} else {
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
}
}
return processedResult;
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
// Check to see if user is authorized to perform a // Check to see if user is authorized to perform a
// particular action on bucket based on ACLs. // particular action on bucket based on ACLs.
// TODO: Add IAM checks // TODO: Add IAM checks
@ -297,69 +392,100 @@ function isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, req
arn = authInfo.getArn(); arn = authInfo.getArn();
} }
// if the bucket owner is an account, users should not have default access // if the bucket owner is an account, users should not have default access
if (((bucket.getOwner() === canonicalID) && requesterIsNotUser) if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
|| isServiceAccount(canonicalID)) { results[_requestType] = actionImplicitDenies[_requestType] === false;
return true; return results[_requestType];
} }
const aclPermission = checkBucketAcls(bucket, requestType, canonicalID); const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
const bucketPolicy = bucket.getBucketPolicy(); // In case of error bucket access is checked with bucketGet
if (!bucketPolicy) { // For website, bucket policy only uses objectGet and ignores bucketGet
return aclPermission; // https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
} }
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
canonicalID, arn, bucket.getOwner(), log, request); request, aclPermission, results, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') { });
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function isObjAuthorized(bucket, objectMD, requestType, canonicalID, authInfo, log, request) { function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, authInfo, actionImplicitDeniesInput = {},
log, request) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
let arn = null;
if (authInfo) {
arn = authInfo.getArn();
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, true, results, actionImplicitDenies);
});
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
const mainApiCall = requestTypes[0];
return requestTypes.every(_requestType => {
// By default, all missing actions are defined as allowed from IAM, to be
// backward compatible
actionImplicitDenies[_requestType] = actionImplicitDenies[_requestType] || false;
const parsedMethodName = _requestType.endsWith('Version')
? _requestType.slice(0, -7) : _requestType;
const bucketOwner = bucket.getOwner(); const bucketOwner = bucket.getOwner();
if (!objectMD) { if (!objectMD) {
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') {
return true;
}
// check bucket has read access // check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions // 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
return isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request); let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
&& results[_requestType] === false) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
}
return results[_requestType];
} }
let requesterIsNotUser = true; let requesterIsNotUser = true;
let arn = null; let arn = null;
let isUserUnauthenticated = false;
if (authInfo) { if (authInfo) {
requesterIsNotUser = !isRequesterNonAccountUser(authInfo); requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn(); arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
} }
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) { if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
return true; results[_requestType] = actionImplicitDenies[_requestType] === false;
} return results[_requestType];
if (isServiceAccount(canonicalID)) {
return true;
} }
// account is authorized if: // account is authorized if:
// - requesttype is included in bucketOwnerActions and // - requesttype is included in bucketOwnerActions and
// - account is the bucket owner // - account is the bucket owner
// - requester is account, not user // - requester is account, not user
if (bucketOwnerActions.includes(requestType) if (bucketOwnerActions.includes(parsedMethodName)
&& (bucketOwner === canonicalID) && (bucketOwner === canonicalID)
&& requesterIsNotUser) { && requesterIsNotUser) {
return true; results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
} }
const aclPermission = checkObjectAcls(bucket, objectMD, requestType, const aclPermission = checkObjectAcls(bucket, objectMD, parsedMethodName,
canonicalID); canonicalID, requesterIsNotUser, isUserUnauthenticated, mainApiCall);
const bucketPolicy = bucket.getBucketPolicy(); return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucketOwner,
if (!bucketPolicy) { log, request, aclPermission, results, actionImplicitDenies);
return aclPermission; });
}
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType,
canonicalID, arn, bucket.getOwner(), log, request);
if (bucketPolicyPermission === 'explicitDeny') {
return false;
}
return (aclPermission || (bucketPolicyPermission === 'allow'));
} }
function _checkResource(resource, bucketArn) { function _checkResource(resource, bucketArn) {
@ -388,6 +514,117 @@ function validatePolicyResource(bucketName, policy) {
}); });
} }
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = { module.exports = {
isBucketAuthorized, isBucketAuthorized,
isObjAuthorized, isObjAuthorized,
@ -398,4 +635,7 @@ module.exports = {
checkBucketAcls, checkBucketAcls,
checkObjectAcls, checkObjectAcls,
validatePolicyResource, validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
}; };

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3'); apiMethod, 's3');
} }
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') { if (apiMethod === 'bucketPut') {
return null; return null;
} }
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = []; const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy' if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') { || apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' : const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet'; 'objectGet';

View File

@ -24,7 +24,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
}); });
} }
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) { function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => { async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ? const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter; oldSplitter : splitter;
@ -40,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
byteLength: partSizeSum, byteLength: partSizeSum,
}); });
next(err); next(err);
}); }, request);
}, cb); }, cb);
} }
/** /**
@ -49,11 +49,13 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
* @param {object} bucketMD - bucket attributes/metadata * @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored * @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester * @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger * @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete * @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined} * @return {undefined}
*/ */
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) { function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
log.trace('deleting bucket from metadata'); log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string'); assert.strictEqual(typeof canonicalID, 'string');
@ -100,7 +102,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
} }
if (objectsListRes.Contents.length) { if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName, return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, log, err => { bucketMD, objectsListRes.Contents, request, log, err => {
if (err) { if (err) {
return next(err); return next(err);
} }

View File

@ -30,6 +30,9 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client // Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) && if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' && (requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' && requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) { requestType !== 'bucketDelete')) {
return true; return true;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck'); const locationConstraintCheck = require('../object/locationConstraintCheck');
const { metadataValidateBucketAndObj } = const { standardMetadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils'); require('../../../metadata/metadataUtils');
const services = require('../../../services'); const services = require('../../../services');
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName, bucketName,
objectKey, objectKey,
uploadId, uploadId,
preciseRequestType: 'multipartDelete', preciseRequestType: request.apiMethods || 'multipartDelete',
request, request,
}; };
// For validating the request at the destinationBucket level // For validating the request at the destinationBucket level
@ -22,10 +22,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
// but the requestType is the more general 'objectDelete' // but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams); const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut'; metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([ async.waterfall([
function checkDestBucketVal(next) { function checkDestBucketVal(next) {
metadataValidateBucketAndObj(metadataValParams, log, standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err) { if (err) {
return next(err, destinationBucket); return next(err, destinationBucket);
@ -56,9 +57,14 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket, function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) { next) {
const location = mpuOverviewObj.controllingLocationConstraint; const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName, return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log, request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => { (err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) { if (err) {
return next(err, destBucket); return next(err, destBucket);
} }

View File

@ -2,11 +2,13 @@
* Code based on Yutaka Oishi (Fujifilm) contributions * Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020 * Date: 11 Sep 2020
*/ */
const ObjectMDArchive = require('arsenal').models.ObjectMDArchive; const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors; const errors = require('arsenal').errors;
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const { locationConstraints } = config; const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Get response header "x-amz-restore" * Get response header "x-amz-restore"
* Be called by objectHead.js * Be called by objectHead.js
@ -32,7 +34,6 @@ function getAmzRestoreResHeader(objMD) {
return undefined; return undefined;
} }
/** /**
* Check if restore can be done. * Check if restore can be done.
* *
@ -41,6 +42,23 @@ function getAmzRestoreResHeader(objMD) {
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled * @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/ */
function _validateStartRestore(objectMD, log) { function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold; const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) { if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage, // return InvalidObjectState error if the object is not in cold storage,
@ -52,18 +70,7 @@ function _validateStartRestore(objectMD, log) {
}); });
return errors.InvalidObjectState; return errors.InvalidObjectState;
} }
if (objectMD.archive?.restoreCompletedAt if (objectMD.archive?.restoreRequestedAt) {
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored // return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists // check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.', log.debug('The object is currently being restored.',
@ -120,22 +127,36 @@ function validatePutVersionId(objMD, versionId, log) {
} }
/** /**
* Check if the object is already restored * Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
* *
* @param {ObjectMD} objectMD - object metadata * @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger * @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored * @return {boolean} - true if the object is already restored
*/ */
function isObjectAlreadyRestored(objectMD, log) { function _updateObjectExpirationDate(objectMD, log) {
// check if restoreCompletedAt field exists // Check if restoreCompletedAt field exists
// and archive.restoreWillExpireAt > current time // Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt // checked earlier in the process, so checking again here would create weird states
&& new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now()); const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', log.debug('The restore status of the object.', {
{
isObjectAlreadyRestored, isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored' method: 'isObjectAlreadyRestored'
}); });
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored; return isObjectAlreadyRestored;
} }
@ -159,7 +180,7 @@ function _updateRestoreInfo(objectMD, restoreParam, log) {
/* eslint-disable no-param-reassign */ /* eslint-disable no-param-reassign */
objectMD.archive.restoreRequestedAt = new Date(); objectMD.archive.restoreRequestedAt = new Date();
objectMD.archive.restoreRequestedDays = restoreParam.days; objectMD.archive.restoreRequestedDays = restoreParam.days;
objectMD.originOp = 's3:ObjectRestore'; objectMD.originOp = 's3:ObjectRestore:Post';
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
if (!ObjectMDArchive.isValid(objectMD.archive)) { if (!ObjectMDArchive.isValid(objectMD.archive)) {
log.debug('archive is not valid', { log.debug('archive is not valid', {
@ -195,12 +216,32 @@ function startRestore(objectMD, restoreParam, log, cb) {
if (updateResultError) { if (updateResultError) {
return cb(updateResultError); return cb(updateResultError);
} }
return cb(null, isObjectAlreadyRestored(objectMD, log)); const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
} }
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = { module.exports = {
startRestore, startRestore,
getAmzRestoreResHeader, getAmzRestoreResHeader,
validatePutVersionId, validatePutVersionId,
verifyColdObjectAvailable,
}; };

View File

@ -5,7 +5,6 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper'); const { data } = require('../../../data/wrapper');
const services = require('../../../services'); const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject'); const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck'); const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning'); const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
@ -21,7 +20,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.'; 'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle, function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) { metadataStoreParams, dataToDelete, log, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo, services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => { cipherBundle, metadataStoreParams, (err, result) => {
if (err) { if (err) {
@ -31,7 +30,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ? const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null; dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod, return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, deleteLog, err => callback(err, result)); newDataStoreName, log, err => callback(err, result));
} }
return callback(null, result); return callback(null, result);
}); });
@ -51,7 +50,9 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* @param {(object|null)} streamingV4Params - if v4 auth, object containing * @param {(object|null)} streamingV4Params - if v4 auth, object containing
* accessKey, signatureFromRequest, region, scopeDate, timestamp, and * accessKey, signatureFromRequest, region, scopeDate, timestamp, and
* credentialScope (to be used for streaming v4 auth if applicable) * credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance * @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function * @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) - * @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version * result.contentMD5 - content md5 of new object or version
@ -59,7 +60,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/ */
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo, function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params, canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
log, callback) { overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id']; const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === ''; const isPutVersion = putVersionId || putVersionId === '';
@ -115,6 +116,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
isDeleteMarker, isDeleteMarker,
replicationInfo: getReplicationInfo( replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo), objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log, log,
}; };
@ -141,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']); removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires; metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging']; metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = 's3:ObjectCreated:Put'; metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration(); = bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) { if (defaultObjectLockConfiguration) {
@ -156,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] = request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader]; objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated'; metadataStoreParams.originOp = originOp;
} }
const backendInfoObj = const backendInfoObj =
@ -187,14 +189,17 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
} }
} }
if (objMD && objMD.uploadId) {
metadataStoreParams.oldReplayId = objMD.uploadId;
}
/* eslint-disable camelcase */ /* eslint-disable camelcase */
const dontSkipBackend = externalBackends; const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */ /* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly']; const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size']; const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([ return async.waterfall([
function storeData(next) { function storeData(next) {
if (size === 0) { if (size === 0) {
@ -283,11 +288,13 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
metadataStoreParams.versionId = options.versionId; metadataStoreParams.versionId = options.versionId;
metadataStoreParams.versioning = options.versioning; metadataStoreParams.versioning = options.versioning;
metadataStoreParams.isNull = options.isNull; metadataStoreParams.isNull = options.isNull;
metadataStoreParams.nullVersionId = options.nullVersionId; metadataStoreParams.deleteNullKey = options.deleteNullKey;
metadataStoreParams.nullUploadId = options.nullUploadId; if (options.extraMD) {
Object.assign(metadataStoreParams, options.extraMD);
}
return _storeInMDandDeleteData(bucketName, infoArr, return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams, cipherBundle, metadataStoreParams,
options.dataToDelete, requestLogger, requestMethod, next); options.dataToDelete, log, requestMethod, next);
}, },
], callback); ], callback);
} }

View File

@ -0,0 +1,18 @@
/**
* _bucketRequiresOplogUpdate - DELETE an object from a bucket
* @param {BucketInfo} bucket - bucket object
* @return {boolean} whether objects require oplog updates on deletion, or not
*/
function _bucketRequiresOplogUpdate(bucket) {
// Default behavior is to require an oplog update
if (!bucket || !bucket.getLifecycleConfiguration || !bucket.getNotificationConfiguration) {
return true;
}
// If the bucket has lifecycle configuration or notification configuration
// set, we also require an oplog update
return bucket.getLifecycleConfiguration() || bucket.getNotificationConfiguration();
}
module.exports = {
_bucketRequiresOplogUpdate,
};

View File

@ -4,23 +4,25 @@ const {
LifecycleDateTime, LifecycleDateTime,
LifecycleUtils, LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers; } = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
// moves lifecycle transition deadlines 1 day earlier, mostly for testing const {
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true'; expireOneDayEarlier,
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing transitionOneDayEarlier,
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true'; timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({ const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier, transitionOneDayEarlier,
expireOneDayEarlier, expireOneDayEarlier,
timeProgressionFactor,
}); });
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime); const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
function calculateDate(objDate, expDays, datetime) { function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay); return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
} }
function formatExpirationHeader(date, id) { function formatExpirationHeader(date, id) {
@ -37,8 +39,10 @@ const AMZ_ABORT_ID_HEADER = 'x-amz-abort-rule-id';
function _generateExpHeadersObjects(rules, params, datetime) { function _generateExpHeadersObjects(rules, params, datetime) {
const tags = { const tags = {
TagSet: Object.keys(params.tags) TagSet: params.tags
.map(key => ({ Key: key, Value: params.tags[key] })), ? Object.keys(params.tags)
.map(key => ({ Key: key, Value: params.tags[key] }))
: [],
}; };
const objectInfo = { Key: params.key }; const objectInfo = { Key: params.key };

View File

@ -0,0 +1,190 @@
const { versioning } = require('arsenal');
const versionIdUtils = versioning.VersionID;
const { lifecycleListing } = require('../../../../constants');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = lifecycleListing;
function _makeTags(tags) {
const res = [];
Object.entries(tags).forEach(([key, value]) =>
res.push(
{
Key: key,
Value: value,
}
));
return res;
}
function processCurrents(bucketName, listParams, isBucketVersioned, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
IsLatest: true, // for compatibility with AWS ListObjectVersions.
DataStoreName: v.dataStoreName,
ListType: CURRENT_TYPE,
};
// NOTE: The current versions listed to be lifecycle should include version id
// if the bucket is versioned.
if (isBucketVersioned) {
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
content.VersionId = versionId;
}
data.Contents.push(content);
});
return data;
}
function _encodeVersionId(vid) {
let versionId = vid;
if (versionId && versionId !== 'null') {
versionId = versionIdUtils.encode(versionId);
}
return versionId;
}
function processNonCurrents(bucketName, listParams, list) {
const nextVersionIdMarker = _encodeVersionId(list.NextVersionIdMarker);
const versionIdMarker = _encodeVersionId(listParams.versionIdMarker);
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
KeyMarker: listParams.keyMarker,
VersionIdMarker: versionIdMarker,
BeforeDate: listParams.beforeDate,
NextKeyMarker: list.NextKeyMarker,
NextVersionIdMarker: nextVersionIdMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
const content = {
Key: item.key,
LastModified: v.LastModified,
ETag: `"${v.ETag}"`,
Size: v.Size,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
StorageClass: v.StorageClass,
TagSet: _makeTags(v.tags),
staleDate: v.staleDate, // lowerCamelCase to be compatible with existing lifecycle.
VersionId: versionId,
DataStoreName: v.dataStoreName,
ListType: NON_CURRENT_TYPE,
};
data.Contents.push(content);
});
return data;
}
function processOrphans(bucketName, listParams, list) {
const data = {
Name: bucketName,
Prefix: listParams.prefix,
MaxKeys: listParams.maxKeys,
MaxScannedLifecycleListingEntries: listParams.maxScannedLifecycleListingEntries,
IsTruncated: !!list.IsTruncated,
Marker: listParams.marker,
BeforeDate: listParams.beforeDate,
NextMarker: list.NextMarker,
Contents: [],
};
list.Contents.forEach(item => {
const v = item.value;
const versionId = (v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId);
data.Contents.push({
Key: item.key,
LastModified: v.LastModified,
Owner: {
ID: v.Owner.ID,
DisplayName: v.Owner.DisplayName,
},
VersionId: versionId,
IsLatest: true, // for compatibility with AWS ListObjectVersions.
ListType: ORPHAN_DM_TYPE,
});
});
return data;
}
function getLocationConstraintErrorMessage(locationName) {
return 'value of the location you are attempting to set ' +
`- ${locationName} - is not listed in the locationConstraint config`;
}
/**
* validateMaxScannedEntries - Validates and returns the maximum scanned entries value.
*
* @param {object} params - Query parameters
* @param {object} config - CloudServer configuration
* @param {number} min - Minimum number of entries to be scanned
* @returns {Object} - An object indicating the validation result:
* - isValid (boolean): Whether the validation is successful.
* - maxScannedLifecycleListingEntries (number): The validated maximum scanned entries value if isValid is true.
*/
function validateMaxScannedEntries(params, config, min) {
let maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
if (params['max-scanned-lifecycle-listing-entries']) {
const maxEntriesParams = Number.parseInt(params['max-scanned-lifecycle-listing-entries'], 10);
if (Number.isNaN(maxEntriesParams) || maxEntriesParams < min ||
maxEntriesParams > maxScannedLifecycleListingEntries) {
return { isValid: false };
}
maxScannedLifecycleListingEntries = maxEntriesParams;
}
return { isValid: true, maxScannedLifecycleListingEntries };
}
module.exports = {
processCurrents,
processNonCurrents,
processOrphans,
getLocationConstraintErrorMessage,
validateMaxScannedEntries,
};

View File

@ -8,12 +8,13 @@
* *
* @param {array|string|null} prev - list of keys from the object being * @param {array|string|null} prev - list of keys from the object being
* overwritten * overwritten
* @param {array} curr - list of keys to be used in composing current object * @param {array|null} curr - list of keys to be used in composing
* current object
* @returns {boolean} true if no key in `curr` is present in `prev`, * @returns {boolean} true if no key in `curr` is present in `prev`,
* false otherwise * false otherwise
*/ */
function locationKeysHaveChanged(prev, curr) { function locationKeysHaveChanged(prev, curr) {
if (!prev || prev.length === 0) { if (!prev || prev.length === 0 || !curr) {
return true; return true;
} }
// backwards compatibility check if object is of model version 2 // backwards compatibility check if object is of model version 2

View File

@ -1,5 +1,11 @@
const { errors } = require('arsenal'); const { errors, auth, policies } = require('arsenal');
const moment = require('moment'); const moment = require('moment');
const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/** /**
* Calculates retain until date for the locked object version * Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period * @param {object} retention - includes days or years retention period
@ -15,8 +21,9 @@ function calculateRetainUntilDate(retention) {
const date = moment(); const date = moment();
// Calculate the number of days to retain the lock on the object // Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365; const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate const retainUntilDate
= date.add(retainUntilDays, 'days'); = date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString(); return retainUntilDate.toISOString();
} }
/** /**
@ -43,7 +50,7 @@ function validateHeaders(bucket, headers, log) {
!(objectLockMode && objectLockDate)) { !(objectLockMode && objectLockDate)) {
return errors.InvalidArgument.customizeDescription( return errors.InvalidArgument.customizeDescription(
'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-retain-until-date and ' +
'x-amz-object-lock-mode must both be supplied' 'x-amz-object-lock-mode must both be supplied',
); );
} }
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']); const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
@ -126,101 +133,216 @@ function setObjectLockInformation(headers, md, defaultRetention) {
} }
/** /**
* isObjectLocked - checks whether object is locked or not * Helper class for check object lock state checks
* @param {obect} bucket - bucket metadata
* @param {object} objectMD - object metadata
* @param {array} headers - request headers
* @return {boolean} - indicates whether object is locked or not
*/ */
function isObjectLocked(bucket, objectMD, headers) { class ObjectLockInfo {
if (bucket.isObjectLockEnabled()) { /**
const objectLegalHold = objectMD.legalHold; *
if (objectLegalHold) { * @param {object} retentionInfo - The object lock retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | null} retentionInfo.mode - Retention policy mode.
* @param {string} retentionInfo.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} retentionInfo.legalHold - Whether a legal hold is enable for the object
*/
constructor(retentionInfo) {
this.mode = retentionInfo.mode || null;
this.date = retentionInfo.date || null;
this.legalHold = retentionInfo.legalHold || false;
}
/**
* ObjectLockInfo.isLocked
* @returns {bool} - Whether the retention policy is active and protecting the object
*/
isLocked() {
if (this.legalHold) {
return true; return true;
} }
const retentionMode = objectMD.retentionMode;
const retentionDate = objectMD.retentionDate; if (!this.mode || !this.date) {
if (!retentionMode || !retentionDate) {
return false; return false;
} }
if (retentionMode === 'GOVERNANCE' &&
headers['x-amz-bypass-governance-retention']) { return !this.isExpired();
return false;
} }
const objectDate = moment(retentionDate);
/**
* ObjectLockInfo.isGovernanceMode
* @returns {bool} - true if retention mode is GOVERNANCE
*/
isGovernanceMode() {
return this.mode === 'GOVERNANCE';
}
/**
* ObjectLockInfo.isComplianceMode
* @returns {bool} - True if retention mode is COMPLIANCE
*/
isComplianceMode() {
return this.mode === 'COMPLIANCE';
}
/**
* ObjectLockInfo.isExpired
* @returns {bool} - True if the retention policy has expired
*/
isExpired() {
const now = moment(); const now = moment();
// indicates retain until date has expired return this.date === null || now.isSameOrAfter(this.date);
if (now.isSameOrAfter(objectDate)) {
return false;
}
return true;
}
return false;
} }
/**
/* objectLockRequiresBypass will return true if the retention info change * ObjectLockInfo.isExtended
* would require a bypass governance flag to be true. * @param {string} timestamp - Timestamp in ISO-8601 format
* In order for this to be true the action must be valid as well, so going from * @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
* COMPLIANCE to GOVERNANCE would return false unless it expired.
*/ */
function objectLockRequiresBypass(objectMD, retentionInfo) { isExtended(timestamp) {
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD; return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
if (!existingMode) {
return false;
} }
const existingDate = new Date(existingDateISO); /**
const isExpired = existingDate < Date.now(); * ObjectLockInfo.canModifyObject
const isExtended = new Date(retentionInfo.date) > existingDate; * @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the retention policy allows the objects data to be modified (overwritten/deleted)
if (existingMode === 'GOVERNANCE' && !isExpired) { */
if (retentionInfo.mode === 'GOVERNANCE' && isExtended) { canModifyObject(hasGovernanceBypass) {
return false; // can modify object if object is not locked
// cannot modify object in any cases if legal hold is enabled
// if no legal hold, can only modify object if bypassing governance when locked
if (!this.isLocked()) {
return true;
} }
return !this.legalHold && this.isGovernanceMode() && !!hasGovernanceBypass;
}
/**
* ObjectLockInfo.canModifyPolicy
* @param {object} policyChanges - Proposed changes to the retention policy
* @param {"GOVERNANCE" | "COMPLIANCE" | undefined} policyChanges.mode - Retention policy mode.
* @param {string} policyChanges.date - Expiration date of retention policy. A string in ISO-8601 format
* @param {bool} hasGovernanceBypass - Whether to bypass governance retention policies
* @returns {bool} - True if the changes are allowed to be applied to the retention policy
*/
canModifyPolicy(policyChanges, hasGovernanceBypass) {
// If an object does not have a retention policy or it is expired then all changes are allowed
if (!this.isLocked()) {
return true; return true;
} }
// an invalid retention change or unrelated to bypass // The only allowed change in compliance mode is extending the retention period
if (this.isComplianceMode()) {
if (policyChanges.mode === 'COMPLIANCE' && this.isExtended(policyChanges.date)) {
return true;
}
}
if (this.isGovernanceMode()) {
// Extensions are always allowed in governance mode
if (policyChanges.mode === 'GOVERNANCE' && this.isExtended(policyChanges.date)) {
return true;
}
// All other changes in governance mode require a bypass
if (hasGovernanceBypass) {
return true;
}
}
return false; return false;
} }
function validateObjectLockUpdate(objectMD, retentionInfo, bypassGovernance) {
const { retentionMode: existingMode, retentionDate: existingDateISO } = objectMD;
if (!existingMode) {
return null;
} }
const existingDate = new Date(existingDateISO); /**
const isExpired = existingDate < Date.now(); *
const isExtended = new Date(retentionInfo.date) > existingDate; * @param {object} headers - s3 request headers
* @returns {bool} - True if the headers is present and === "true"
if (existingMode === 'GOVERNANCE' && !isExpired && !bypassGovernance) { */
if (retentionInfo.mode === 'GOVERNANCE' && isExtended) { function hasGovernanceBypassHeader(headers) {
return null; const bypassHeader = headers['x-amz-bypass-governance-retention'] || '';
} return bypassHeader.toLowerCase() === 'true';
return errors.AccessDenied;
} }
if (existingMode === 'COMPLIANCE') {
if (retentionInfo.mode === 'GOVERNANCE' && !isExpired) {
return errors.AccessDenied;
}
if (!isExtended) { /**
return errors.AccessDenied; * checkUserGovernanceBypass
} *
} * Checks for the presence of the s3:BypassGovernanceRetention permission for a given user
*
* @param {object} request - Incoming s3 request
* @param {object} authInfo - s3 authentication info
* @param {object} bucketMD - bucket metadata
* @param {string} objectKey - object key
* @param {object} log - Werelogs logger
* @param {function} cb - callback returns errors.AccessDenied if the authorization fails
* @returns {undefined} -
*/
function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log, cb) {
log.trace(
'object in GOVERNANCE mode and is user, checking for attached policies',
{ method: 'checkUserPolicyGovernanceBypass' },
);
return null; const authParams = auth.server.extractParams(request, log, 's3', request.query);
const ip = policies.requestUtils.getClientIp(request, config);
const requestContextParams = {
constantParams: {
headers: request.headers,
query: request.query,
generalResource: bucketMD.getName(),
specificResource: { key: objectKey },
requesterIp: ip,
sslEnabled: request.connection.encrypted,
apiMethod: 'bypassGovernanceRetention',
awsService: 's3',
locationConstraint: bucketMD.getLocationConstraint(),
requesterInfo: authInfo,
signatureVersion: authParams.params.data.signatureVersion,
authType: authParams.params.data.authType,
signatureAge: authParams.params.data.signatureAge,
},
};
return vault.checkPolicies(requestContextParams,
authInfo.getArn(), log, (err, authorizationResults) => {
if (err) {
return cb(err);
}
const explicitDenyExists = authorizationResults.some(
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user',
{
'method': 'checkUserPolicyGovernanceBypass',
's3:BypassGovernanceRetention': false,
});
return cb(errors.AccessDenied);
}
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
});
} }
module.exports = { module.exports = {
calculateRetainUntilDate, calculateRetainUntilDate,
compareObjectLockInformation, compareObjectLockInformation,
setObjectLockInformation, setObjectLockInformation,
isObjectLocked,
validateHeaders, validateHeaders,
validateObjectLockUpdate, hasGovernanceBypassHeader,
objectLockRequiresBypass, checkUserGovernanceBypass,
ObjectLockInfo,
}; };

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning'); const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore; const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/** /**
* Check if tier is supported * Check if tier is supported
@ -58,13 +58,22 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
bucketName, bucketName,
objectKey, objectKey,
versionId: decodedVidResult, versionId: decodedVidResult,
requestType: 'restoreObject', requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
}; };
return async.waterfall([ return async.waterfall([
// get metadata of bucket and object // get metadata of bucket and object
function validateBucketAndObject(next) { function validateBucketAndObject(next) {
return mdUtils.metadataValidateBucketAndObj(mdValueParams, log, (err, bucketMD, objectMD) => { return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
log, (err, bucketMD, objectMD) => {
if (err) { if (err) {
log.trace('request authorization failed', { method: METHOD, error: err }); log.trace('request authorization failed', { method: METHOD, error: err });
return next(err); return next(err);
@ -115,6 +124,16 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD); return next(err, bucketMD, objectMD);
}); });
}, },
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) { function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {}; const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params, metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -0,0 +1,32 @@
const { errors } = require('arsenal');
const { unsupportedSignatureChecksums, supportedSignatureChecksums } = require('../../../../constants');
function validateChecksumHeaders(headers) {
// If the x-amz-trailer header is present the request is using one of the
// trailing checksum algorithms, which are not supported.
if (headers['x-amz-trailer'] !== undefined) {
return errors.BadRequest.customizeDescription('trailing checksum is not supported');
}
const signatureChecksum = headers['x-amz-content-sha256'];
if (signatureChecksum === undefined) {
return null;
}
if (supportedSignatureChecksums.has(signatureChecksum)) {
return null;
}
// If the value is not one of the possible checksum algorithms
// the only other valid value is the actual sha256 checksum of the payload.
// Do a simple sanity check of the length to guard against future algos.
// If the value is an unknown algo, then it will fail checksum validation.
if (!unsupportedSignatureChecksums.has(signatureChecksum) && signatureChecksum.length === 64) {
return null;
}
return errors.BadRequest.customizeDescription('unsupported checksum algorithm');
}
module.exports = validateChecksumHeaders;

View File

@ -4,7 +4,7 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config'); const { config } = require('../../../Config');
const oneDay = 24 * 60 * 60 * 1000; const { scaledMsPerDay } = config.getTimeOptions();
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata // Use Arsenal function to generate a version ID used internally by metadata
@ -58,7 +58,7 @@ function decodeVersionId(reqQuery) {
*/ */
function getVersionIdResHeader(verCfg, objectMD) { function getVersionIdResHeader(verCfg, objectMD) {
if (verCfg) { if (verCfg) {
if (objectMD.isNull || (objectMD && !objectMD.versionId)) { if (objectMD.isNull || !objectMD.versionId) {
return 'null'; return 'null';
} }
return versionIdUtils.encode(objectMD.versionId); return versionIdUtils.encode(objectMD.versionId);
@ -79,17 +79,34 @@ function checkQueryVersionId(query) {
return undefined; return undefined;
} }
function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) { function _storeNullVersionMD(bucketName, objKey, nullVersionId, objMD, log, cb) {
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => { // In compatibility mode, create null versioned keys instead of null keys
let versionId;
let nullVersionMD;
if (config.nullVersionCompatMode) {
versionId = nullVersionId;
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
});
} else {
versionId = 'null';
nullVersionMD = Object.assign({}, objMD, {
versionId: nullVersionId,
isNull: true,
isNull2: true,
});
}
metadata.putObjectMD(bucketName, objKey, nullVersionMD, { versionId }, log, err => {
if (err) { if (err) {
log.debug('error from metadata storing null version as new version', log.debug('error from metadata storing null version as new version',
{ error: err }); { error: err });
} }
cb(err, options); cb(err);
}); });
} }
/** get location of null version data for deletion /** check existence and get location of null version data for deletion
* @param {string} bucketName - name of bucket * @param {string} bucketName - name of bucket
* @param {string} objKey - name of object key * @param {string} objKey - name of object key
* @param {object} options - metadata options for getting object MD * @param {object} options - metadata options for getting object MD
@ -100,49 +117,55 @@ function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
* @param {function} cb - callback * @param {function} cb - callback
* @return {undefined} - and call callback with (err, dataToDelete) * @return {undefined} - and call callback with (err, dataToDelete)
*/ */
function _getNullVersionsToDelete(bucketName, objKey, options, mst, log, cb) { function _prepareNullVersionDeletion(bucketName, objKey, options, mst, log, cb) {
const nullOptions = {};
if (!options.deleteData) {
return process.nextTick(cb, null, nullOptions);
}
if (options.versionId === mst.versionId) { if (options.versionId === mst.versionId) {
// no need to get delete location, we already have the master's metadata // no need to get another key as the master is the target
const dataToDelete = mst.objLocation; nullOptions.dataToDelete = mst.objLocation;
return process.nextTick(cb, null, dataToDelete); return process.nextTick(cb, null, nullOptions);
}
if (options.versionId === 'null') {
// deletion of the null key will be done by the main metadata
// PUT via this option
nullOptions.deleteNullKey = true;
} }
return metadata.getObjectMD(bucketName, objKey, options, log, return metadata.getObjectMD(bucketName, objKey, options, log,
(err, versionMD) => { (err, versionMD) => {
if (err) { if (err) {
log.debug('err from metadata getting specified version', { // the null key may not exist, hence it's a normal
error: err, // situation to have a NoSuchKey error, in which case
method: '_getNullVersionsToDelete', // there is nothing to delete
if (err.is.NoSuchKey) {
log.debug('null version does not exist', {
method: '_prepareNullVersionDeletion',
}); });
} else {
log.warn('could not get null version metadata', {
error: err,
method: '_prepareNullVersionDeletion',
});
}
return cb(err); return cb(err);
} }
if (!versionMD.location) { if (versionMD.location) {
return cb();
}
const dataToDelete = Array.isArray(versionMD.location) ? const dataToDelete = Array.isArray(versionMD.location) ?
versionMD.location : [versionMD.location]; versionMD.location : [versionMD.location];
return cb(null, dataToDelete); nullOptions.dataToDelete = dataToDelete;
}
return cb(null, nullOptions);
}); });
} }
function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) { function _deleteNullVersionMD(bucketName, objKey, options, log, cb) {
return _getNullVersionsToDelete(bucketName, objKey, options, mst, log, return metadata.deleteObjectMD(bucketName, objKey, options, log, err => {
(err, nullDataToDelete) => {
if (err) { if (err) {
log.warn('could not find null version metadata', { log.warn('metadata error deleting null versioned key',
error: err, { bucketName, objKey, error: err, method: '_deleteNullVersionMD' });
method: '_deleteNullVersionMD',
});
return cb(err);
} }
return metadata.deleteObjectMD(bucketName, objKey, options, log,
err => {
if (err) {
log.warn('metadata error deleting null version',
{ error: err, method: '_deleteNullVersionMD' });
return cb(err); return cb(err);
}
return cb(null, nullDataToDelete);
});
}); });
} }
@ -153,73 +176,103 @@ function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
* @param {object} mst - state of master version, as returned by * @param {object} mst - state of master version, as returned by
* getMasterState() * getMasterState()
* @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended' * @param {string} vstat - bucket versioning status: 'Enabled' or 'Suspended'
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode and return appropriate values: this mode
* does not attempt to create null keys but create null versioned keys
* instead
* *
* @return {object} result object with the following attributes: * @return {object} result object with the following attributes:
* - {object} options: versioning-related options to pass to the * - {object} options: versioning-related options to pass to the
services.metadataStoreObject() call services.metadataStoreObject() call
* - {object} [storeOptions]: options for metadata to create a new * - {object} [options.extraMD]: extra attributes to set in object metadata
null version key, if needed * - {string} [nullVersionId]: null version key to create, if needed
* - {object} [delOptions]: options for metadata to delete the null * - {object} [delOptions]: options for metadata to delete the null
version key, if needed version key, if needed
*/ */
function processVersioningState(mst, vstat) { function processVersioningState(mst, vstat, nullVersionCompatMode) {
const options = {}; const versioningSuspended = (vstat === 'Suspended');
const storeOptions = {}; const masterIsNull = mst.exists && (mst.isNull || !mst.versionId);
const delOptions = {};
// object does not exist or is not versioned (before versioning) if (versioningSuspended) {
if (mst.versionId === undefined || mst.isNull) { // versioning is suspended: overwrite the existing null version
// versioning is suspended, overwrite existing master version const options = { versionId: '', isNull: true };
if (vstat === 'Suspended') { if (masterIsNull) {
options.versionId = ''; // if the null version exists, clean it up prior to put
options.isNull = true; if (mst.objLocation) {
options.dataToDelete = mst.objLocation; options.dataToDelete = mst.objLocation;
// if null version exists, clean it up prior to put
if (mst.isNull) {
delOptions.versionId = mst.versionId;
if (mst.uploadId) {
delOptions.replayId = mst.uploadId;
} }
// backward-compat: a null version key may exist even with
// a null master (due to S3C-7526), if so, delete it (its
// data will be deleted as part of the master cleanup, so
// no "deleteData" param is needed)
//
// "isNull2" attribute is set in master metadata when
// null keys are used, which is used as an optimization to
// avoid having to check the versioned key since there can
// be no more versioned key to clean up
if (mst.isNull && mst.versionId && !mst.isNull2) {
const delOptions = { versionId: mst.versionId };
return { options, delOptions }; return { options, delOptions };
} }
return { options }; return { options };
} }
// versioning is enabled, create a new version if (mst.nullVersionId) {
options.versioning = true; // backward-compat: delete the null versioned key and data
if (mst.exists) { const delOptions = { versionId: mst.nullVersionId, deleteData: true };
// store master version in a new key
const versionId = mst.isNull ? mst.versionId : nonVersionedObjId;
storeOptions.versionId = versionId;
storeOptions.isNull = true;
options.nullVersionId = versionId;
// non-versioned (non-null) MPU objects don't have a
// replay ID, so don't reference their uploadId
if (mst.isNull && mst.uploadId) {
options.nullUploadId = mst.uploadId;
}
return { options, storeOptions };
}
return { options };
}
// master is versioned and is not a null version
const nullVersionId = mst.nullVersionId;
if (vstat === 'Suspended') {
// versioning is suspended, overwrite the existing master version
options.versionId = '';
options.isNull = true;
if (nullVersionId === undefined) {
return { options };
}
delOptions.versionId = nullVersionId;
if (mst.nullUploadId) { if (mst.nullUploadId) {
delOptions.replayId = mst.nullUploadId; delOptions.replayId = mst.nullUploadId;
} }
return { options, delOptions }; return { options, delOptions };
} }
// versioning is enabled, put the new version // clean up the eventual null key's location data prior to put
options.versioning = true;
options.nullVersionId = nullVersionId; // NOTE: due to metadata v1 internal format, we cannot guess
// from the master key whether there is an associated null
// key, because the master key may be removed whenever the
// latest version becomes a delete marker. Hence we need to
// pessimistically try to get the null key metadata and delete
// it if it exists.
const delOptions = { versionId: 'null', deleteData: true };
return { options, delOptions };
}
// versioning is enabled: create a new version
const options = { versioning: true };
if (masterIsNull) {
// if master is a null version or a non-versioned key,
// copy it to a new null key
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
if (nullVersionCompatMode) {
options.extraMD = {
nullVersionId,
};
if (mst.uploadId) {
options.extraMD.nullUploadId = mst.uploadId;
}
return { options, nullVersionId };
}
if (mst.isNull && !mst.isNull2) {
// if master null version was put with an older
// Cloudserver (or in compat mode), there is a
// possibility that it also has a null versioned key
// associated, so we need to delete it as we write the
// null key
const delOptions = {
versionId: nullVersionId,
};
return { options, nullVersionId, delOptions };
}
return { options, nullVersionId };
}
// backward-compat: keep a reference to the existing null
// versioned key
if (mst.nullVersionId) {
options.extraMD = {
nullVersionId: mst.nullVersionId,
};
if (mst.nullUploadId) { if (mst.nullUploadId) {
options.nullUploadId = mst.nullUploadId; options.extraMD.nullUploadId = mst.nullUploadId;
}
} }
return { options }; return { options };
} }
@ -246,6 +299,7 @@ function getMasterState(objMD) {
versionId: objMD.versionId, versionId: objMD.versionId,
uploadId: objMD.uploadId, uploadId: objMD.uploadId,
isNull: objMD.isNull, isNull: objMD.isNull,
isNull2: objMD.isNull2,
nullVersionId: objMD.nullVersionId, nullVersionId: objMD.nullVersionId,
nullUploadId: objMD.nullUploadId, nullUploadId: objMD.nullUploadId,
}; };
@ -269,9 +323,6 @@ function getMasterState(objMD) {
* ('' overwrites the master version) * ('' overwrites the master version)
* options.versioning - (true/undefined) metadata instruction to create new ver * options.versioning - (true/undefined) metadata instruction to create new ver
* options.isNull - (true/undefined) whether new version is null or not * options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
* options.deleteNullVersionData - whether to delete the data of the null ver
*/ */
function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD, function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
log, callback) { log, callback) {
@ -283,42 +334,102 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
return process.nextTick(callback, null, options); return process.nextTick(callback, null, options);
} }
// bucket is versioning configured // bucket is versioning configured
const { options, storeOptions, delOptions } = const { options, nullVersionId, delOptions } =
processVersioningState(mst, vCfg.Status); processVersioningState(mst, vCfg.Status, config.nullVersionCompatMode);
return async.series([ return async.series([
function storeVersion(next) { function storeNullVersionMD(next) {
if (!storeOptions) { if (!nullVersionId) {
return process.nextTick(next); return process.nextTick(next);
} }
const versionMD = Object.assign({}, objMD, storeOptions); return _storeNullVersionMD(bucketName, objectKey, nullVersionId, objMD, log, next);
const params = { versionId: storeOptions.versionId };
return _storeNullVersionMD(bucketName, objectKey, versionMD,
params, log, next);
}, },
function deleteNullVersion(next) { function prepareNullVersionDeletion(next) {
if (!delOptions) { if (!delOptions) {
return process.nextTick(next); return process.nextTick(next);
} }
return _deleteNullVersionMD(bucketName, objectKey, delOptions, mst, return _prepareNullVersionDeletion(
log, (err, nullDataToDelete) => { bucketName, objectKey, delOptions, mst, log,
(err, nullOptions) => {
if (err) { if (err) {
log.warn('unexpected error deleting null version md', { return next(err);
error: err,
method: 'versioningPreprocessing',
});
// it's possible there was a concurrent request to
// delete the null version, so proceed with putting a
// new version
if (err.is.NoSuchKey) {
return next(null, options);
} }
return next(errors.InternalError); Object.assign(options, nullOptions);
}
Object.assign(options, { dataToDelete: nullDataToDelete });
return next(); return next();
}); });
}, },
], err => callback(err, options)); function deleteNullVersionMD(next) {
if (delOptions &&
delOptions.versionId &&
delOptions.versionId !== 'null') {
// backward-compat: delete old null versioned key
return _deleteNullVersionMD(
bucketName, objectKey, { versionId: delOptions.versionId }, log, next);
}
return process.nextTick(next);
},
], err => {
// it's possible there was a prior request that deleted the
// null version, so proceed with putting a new version
if (err && err.is.NoSuchKey) {
return callback(null, options);
}
return callback(err, options);
});
}
/** Return options to pass to Metadata layer for version-specific
* operations with the given requested version ID
*
* @param {object} objectMD - object metadata
* @param {boolean} nullVersionCompatMode - if true, behaves in null
* version compatibility mode
* @return {object} options object with params:
* {string} [options.versionId] - specific versionId to update
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're updating or deleting a new-style null
* version (stored in master or null key), or not a null version.
*/
function getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode) {
// Use the internal versionId if it is a "real" null version (not
// non-versioned)
//
// If the target object is non-versioned: do not specify a
// "versionId" attribute nor "isNull"
//
// If the target version is a null version, i.e. has the "isNull"
// attribute:
//
// - send the "isNull=true" param to Metadata if the version is
// already a null key put by a non-compat mode Cloudserver, to
// let Metadata know that the null key is to be updated or
// deleted. This is the case if the "isNull2" metadata attribute
// exists
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that it is a legacy null version
//
// If the target version is not a null version and is versioned:
//
// - send the "isNull=false" param to Metadata in non-compat
// mode (mandatory for v1 format)
//
// - otherwise, do not send the "isNull" parameter to hint
// Metadata that an existing null version may not be stored in a
// null key
//
//
if (objectMD.versionId === undefined) {
return {};
}
const options = { versionId: objectMD.versionId };
if (objectMD.isNull) {
if (objectMD.isNull2) {
options.isNull = true;
}
} else if (!nullVersionCompatMode) {
options.isNull = false;
}
return options;
} }
/** preprocessingVersioningDelete - return versioning information for S3 to /** preprocessingVersioningDelete - return versioning information for S3 to
@ -327,64 +438,67 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
* @param {object} bucketMD - bucket metadata * @param {object} bucketMD - bucket metadata
* @param {object} objectMD - obj metadata * @param {object} objectMD - obj metadata
* @param {string} [reqVersionId] - specific version ID sent as part of request * @param {string} [reqVersionId] - specific version ID sent as part of request
* @param {RequestLogger} log - logger instance * @param {boolean} nullVersionCompatMode - if true, behaves in null version compatibility mode
* @param {function} callback - callback * @return {object} options object with params:
* @return {undefined} and call callback with params (err, options): * {boolean} [options.deleteData=true|undefined] - whether to delete data (if undefined
* options.deleteData - (true/undefined) whether to delete data (if undefined
* means creating a delete marker instead) * means creating a delete marker instead)
* options.versionId - specific versionId to delete * {string} [options.versionId] - specific versionId to delete
* {boolean} [options.isNull=true|false|undefined] - if set, tells the
* Metadata backend if we're deleting a new-style null version (stored
* in master or null key), or not a null version.
*/ */
function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersionId, nullVersionCompatMode) {
reqVersionId, log, callback) { let options = {};
const options = {}; if (bucketMD.getVersioningConfiguration() && reqVersionId) {
// bucket is not versioning enabled options = getVersionSpecificMetadataOptions(objectMD, nullVersionCompatMode);
if (!bucketMD.getVersioningConfiguration()) { }
if (!bucketMD.getVersioningConfiguration() || reqVersionId) {
// delete data if bucket is non-versioned or the request
// deletes a specific version
options.deleteData = true; options.deleteData = true;
return callback(null, options);
} }
// bucket is versioning enabled return options;
if (reqVersionId && reqVersionId !== 'null') {
// deleting a specific version
options.deleteData = true;
options.versionId = reqVersionId;
if (objectMD.uploadId) {
options.replayId = objectMD.uploadId;
} }
return callback(null, options);
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
} }
if (reqVersionId) { });
// deleting the 'null' version if it exists
if (objectMD.versionId === undefined) { if (objMD['x-amz-website-redirect-location']) {
// object is not versioned, deleting it if (!metadataStoreParams.headers) {
options.deleteData = true; metadataStoreParams.headers = {};
// non-versioned (non-null) MPU objects don't have a
// replay ID, so don't reference their uploadId
return callback(null, options);
} }
if (objectMD.isNull) { metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
// master is the null version
options.deleteData = true;
options.versionId = objectMD.versionId;
if (objectMD.uploadId) {
options.replayId = objectMD.uploadId;
} }
return callback(null, options);
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
} }
if (objectMD.nullVersionId) {
// null version exists, deleting it if (objMD.legalHold) {
options.deleteData = true; metadataStoreParams.legalHold = objMD.legalHold;
options.versionId = objectMD.nullVersionId;
if (objectMD.nullUploadId) {
options.replayId = objectMD.nullUploadId;
} }
return callback(null, options);
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
} }
// null version does not exist, no deletion
// TODO check AWS behaviour for no deletion (seems having no error) metadataStoreParams.creationTime = objMD['creation-time'];
return callback(errors.NoSuchKey); metadataStoreParams.lastModifiedDate = objMD['last-modified'];
} metadataStoreParams.taggingCopy = objMD.tags;
// not deleting any specific version, making a delete marker instead
return callback(null, options);
} }
/** overwritingVersioning - return versioning information for S3 to handle /** overwritingVersioning - return versioning information for S3 to handle
@ -398,10 +512,11 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD,
* version id of the null version * version id of the null version
*/ */
function overwritingVersioning(objMD, metadataStoreParams) { function overwritingVersioning(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.updateMicroVersionId = true; metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
// update restore // update restore
const days = objMD.archive?.restoreRequestedDays; const days = objMD.archive?.restoreRequestedDays;
@ -411,7 +526,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
restoreRequestedAt: objMD.archive?.restoreRequestedAt, restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays, restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now), restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * oneDay)), restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
}; };
/* eslint-enable no-param-reassign */ /* eslint-enable no-param-reassign */
@ -420,8 +535,14 @@ function overwritingVersioning(objMD, metadataStoreParams) {
const options = { const options = {
versionId, versionId,
isNull: objMD.isNull, isNull: objMD.isNull,
};
if (objMD.nullVersionId) {
options.extraMD = {
nullVersionId: objMD.nullVersionId, nullVersionId: objMD.nullVersionId,
}; };
}
restoreMetadata(objMD, metadataStoreParams);
return options; return options;
} }
@ -433,6 +554,7 @@ module.exports = {
processVersioningState, processVersioningState,
getMasterState, getMasterState,
versioningPreprocessing, versioningPreprocessing,
getVersionSpecificMetadataOptions,
preprocessingVersioningDelete, preprocessingVersioningDelete,
overwritingVersioning, overwritingVersioning,
decodeVID, decodeVID,

View File

@ -101,8 +101,33 @@ function validateWebsiteHeader(header) {
header.startsWith('http://') || header.startsWith('https://')); header.startsWith('http://') || header.startsWith('https://'));
} }
/**
* appendWebsiteIndexDocument - append index to objectKey if necessary
* @param {object} request - normalized request object
* @param {string} indexDocumentSuffix - index document from website config
* @param {boolean} force - flag to force append index
* @return {undefined}
*/
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
const reqObjectKey = request.objectKey ? request.objectKey : '';
/* eslint-disable no-param-reassign */
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
request.objectKey += indexDocumentSuffix;
// find index document if no key provided
} else if (reqObjectKey === '') {
request.objectKey = indexDocumentSuffix;
// force for redirect 302 on folder without trailing / that has an index
} else if (force) {
request.objectKey += `/${indexDocumentSuffix}`;
}
/* eslint-enable no-param-reassign */
}
module.exports = { module.exports = {
findRoutingRule, findRoutingRule,
extractRedirectInfo, extractRedirectInfo,
validateWebsiteHeader, validateWebsiteHeader,
appendWebsiteIndexDocument,
}; };

View File

@ -0,0 +1,314 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -0,0 +1,117 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processCurrents(bucketName, listParams, isBucketVersioned, list);
pushMetric('listLifecycleCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleCurrents');
return callback(null, res);
}
/**
* listLifecycleCurrents - Return list of current versions/masters in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
const minEntriesToBeScanned = 1;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleCurrents',
request,
};
const listParams = {
listingType: 'DelimiterCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, isBucketVersioned, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, isBucketVersioned, log, callback);
});
});
}
module.exports = {
listLifecycleCurrents,
};

View File

@ -0,0 +1,127 @@
const { errors, versioning } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processNonCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processNonCurrents(bucketName, listParams, list);
pushMetric('listLifecycleNonCurrents', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleNonCurrents');
return callback(null, res);
}
/**
* listLifecycleNonCurrents - Return list of non-current versions in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleNonCurrents' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleNonCurrents');
return callback(errors.InvalidArgument);
}
const excludedDataStoreName = params['excluded-data-store-name'];
if (excludedDataStoreName && !locationConstraints[excludedDataStoreName]) {
const errMsg = getLocationConstraintErrorMessage(excludedDataStoreName);
log.error(`locationConstraint is invalid - ${errMsg}`, { locationConstraint: excludedDataStoreName });
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleCurrents');
return callback(errors.InvalidLocationConstraint.customizeDescription(errMsg));
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleNonCurrents',
request,
};
const listParams = {
listingType: 'DelimiterNonCurrent',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
keyMarker: params['key-marker'],
excludedDataStoreName,
maxScannedLifecycleListingEntries,
};
listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined;
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { method: 'services.getLifecycleListing', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleNonCurrents');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleNonCurrents,
};

View File

@ -0,0 +1,112 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');
function handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback) {
// eslint-disable-next-line no-param-reassign
listParams.maxKeys = requestMaxKeys;
const res = processOrphans(bucketName, listParams, list);
pushMetric('listLifecycleOrphanDeleteMarkers', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listLifecycleOrphanDeleteMarkers');
return callback(null, res);
}
/**
* listLifecycleOrphanDeleteMarkers - Return list of expired object delete marker in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
* requester's info
* @param {array} locationConstraints - array of location contraint
* @param {object} request - http request object
* @param {function} log - Werelogs request logger
* @param {function} callback - callback to respond to http request
* with either error code or xml response body
* @return {undefined}
*/
function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request, log, callback) {
const params = request.query;
const bucketName = request.bucketName;
log.debug('processing request', { method: 'listLifecycleOrphanDeleteMarkers' });
const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const actualMaxKeys = Math.min(constants.listingHardLimit, requestMaxKeys);
// 3 is required as a minimum because we must scan at least three entries to determine version eligibility.
// Two entries representing the master key and the following one representing the non-current version.
const minEntriesToBeScanned = 3;
const { isValid, maxScannedLifecycleListingEntries } =
validateMaxScannedEntries(params, config, minEntriesToBeScanned);
if (!isValid) {
monitoring.promMetrics('GET', bucketName, 400, 'listLifecycleOrphanDeleteMarkers');
return callback(errors.InvalidArgument);
}
const metadataValParams = {
authInfo,
bucketName,
requestType: 'listLifecycleOrphanDeleteMarkers',
request,
};
const listParams = {
listingType: 'DelimiterOrphanDeleteMarker',
maxKeys: actualMaxKeys,
prefix: params.prefix,
beforeDate: params['before-date'],
marker: params.marker,
maxScannedLifecycleListingEntries,
};
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
const vcfg = bucket.getVersioningConfiguration();
const isBucketVersioned = vcfg && (vcfg.Status === 'Enabled' || vcfg.Status === 'Suspended');
if (!isBucketVersioned) {
log.debug('bucket is not versioned or suspended');
return callback(errors.InvalidRequest.customizeDescription(
'bucket is not versioned'), null);
}
if (!requestMaxKeys) {
const emptyList = {
Contents: [],
IsTruncated: false,
};
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, emptyList, log, callback);
}
return services.getLifecycleListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listLifecycleOrphanDeleteMarkers');
return callback(err, null);
}
return handleResult(listParams, requestMaxKeys, authInfo,
bucketName, list, log, callback);
});
});
}
module.exports = {
listLifecycleOrphanDeleteMarkers,
};

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion'); const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucketMD) => { (err, bucketMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD); request.method, bucketMD);
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
log.trace('passed checks', log.trace('passed checks',
{ method: 'metadataValidateBucket' }); { method: 'metadataValidateBucket' });
return deleteBucket(authInfo, bucketMD, bucketName, return deleteBucket(authInfo, bucketMD, bucketName,
authInfo.getCanonicalID(), log, err => { authInfo.getCanonicalID(), request, log, err => {
if (err) { if (err) {
monitoring.promMetrics( monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket'); 'DELETE', bucketName, err.code, 'deleteBucket');

View File

@ -38,7 +38,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteCors', method: 'bucketDeleteCors',

View File

@ -1,7 +1,7 @@
const async = require('async'); const async = require('async');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteEncryption', requestType: request.apiMethods || 'bucketDeleteEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next), next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
const sseConfig = bucket.getServerSideEncryption(); const sseConfig = bucket.getServerSideEncryption();

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteLifecycle', requestType: request.apiMethods || 'bucketDeleteLifecycle',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeletePolicy', requestType: request.apiMethods || 'bucketDeletePolicy',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -0,0 +1,58 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteReplication', requestType: request.apiMethods || 'bucketDeleteReplication',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { waterfall } = require('async'); const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
@ -20,16 +20,20 @@ function bucketDeleteTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketDeleteTagging', requestType: request.apiMethods || 'bucketDeleteTagging',
request,
}; };
let bucket = null; let bucket = null;
return waterfall([ return waterfall([
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => { (err, b) => {
if (err) {
return next(err);
}
bucket = b; bucket = b;
bucket.setTags([]); bucket.setTags([]);
return next(err); return next();
}), }),
next => metadata.updateBucket(bucket.getName(), bucket, log, next), next => metadata.updateBucket(bucket.getName(), bucket, log, next),
], err => { ], err => {

View File

@ -30,7 +30,8 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketDeleteWebsite', method: 'bucketDeleteWebsite',

View File

@ -2,7 +2,7 @@ const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal'); const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -210,7 +210,7 @@ function processMasterVersions(bucketName, listParams, list) {
xmlParams.forEach(p => { xmlParams.forEach(p => {
if (p.value && skipUrlEncoding.has(p.tag)) { if (p.value && skipUrlEncoding.has(p.tag)) {
xml.push(`<${p.tag}>${p.value}</${p.tag}>`); xml.push(`<${p.tag}>${p.value}</${p.tag}>`);
} else if (p.value || p.tag === 'KeyCount') { } else if (p.value || p.tag === 'KeyCount' || p.tag === 'MaxKeys') {
xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`); xml.push(`<${p.tag}>${escapeXmlFn(p.value)}</${p.tag}>`);
} else if (p.tag !== 'NextMarker' && } else if (p.tag !== 'NextMarker' &&
p.tag !== 'EncodingType' && p.tag !== 'EncodingType' &&
@ -322,7 +322,7 @@ function bucketGet(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGet', requestType: request.apiMethods || 'bucketGet',
request, request,
}; };
const listParams = { const listParams = {
@ -345,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
listParams.marker = params.marker; listParams.marker = params.marker;
} }
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -1,5 +1,5 @@
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetACL', requestType: request.apiMethods || 'bucketGetACL',
request, request,
}; };
const grantInfo = { const grantInfo = {
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
}, },
}; };
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

View File

@ -39,7 +39,8 @@ function bucketGetCors(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { log.debug('access denied for user on bucket', {
requestType, requestType,
method: 'bucketGetCors', method: 'bucketGetCors',

View File

@ -4,7 +4,7 @@ const async = require('async');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
/** /**
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetEncryption', requestType: request.apiMethods || 'bucketGetEncryption',
request, request,
}; };
return async.waterfall([ return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next), next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)), (bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => { (bucket, next) => {
// If sseInfo is present but the `mandatory` flag is not set // If sseInfo is present but the `mandatory` flag is not set

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const LifecycleConfiguration = const LifecycleConfiguration =
require('arsenal').models.LifecycleConfiguration; require('arsenal').models.LifecycleConfiguration;
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetLifecycle', requestType: request.apiMethods || 'bucketGetLifecycle',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -41,7 +41,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) { if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', { log.debug('access denied for account on bucket', {
requestType, requestType,
method: 'bucketGetLocation', method: 'bucketGetLocation',

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { NotificationConfiguration } = require('arsenal').models; const { NotificationConfiguration } = require('arsenal').models;
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetNotification', requestType: request.apiMethods || 'bucketGetNotification',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const ObjectLockConfiguration = const ObjectLockConfiguration =
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetObjectLock', requestType: request.apiMethods || 'bucketGetObjectLock',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/** /**
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetPolicy', requestType: request.apiMethods || 'bucketGetPolicy',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

58
lib/api/bucketGetQuota.js Normal file
View File

@ -0,0 +1,58 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal'); const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfigurationXML } = const { getReplicationConfigurationXML } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
@ -21,10 +21,10 @@ function bucketGetReplication(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetReplication', requestType: request.apiMethods || 'bucketGetReplication',
request, request,
}; };
return metadataValidateBucket(metadataValParams, log, (err, bucket) => { return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket); const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) { if (err) {
log.debug('error processing request', { log.debug('error processing request', {

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner'); const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -67,7 +67,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetTagging', requestType: request.apiMethods || 'bucketGetTagging',
request, request,
}; };
let bucket = null; let bucket = null;
@ -75,7 +75,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
let tags = null; let tags = null;
return waterfall([ return waterfall([
next => metadataValidateBucket(metadataValParams, log, next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => { (err, b) => {
bucket = b; bucket = b;
return next(err); return next(err);

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler'); const monitoring = require('../utilities/monitoringHandler');
@ -54,11 +54,11 @@ function bucketGetVersioning(authInfo, request, log, callback) {
const metadataValParams = { const metadataValParams = {
authInfo, authInfo,
bucketName, bucketName,
requestType: 'bucketGetVersioning', requestType: request.apiMethods || 'bucketGetVersioning',
request, request,
}; };
metadataValidateBucket(metadataValParams, log, (err, bucket) => { standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {

Some files were not shown because too many files have changed in this diff Show More