Compare commits

..

438 Commits

Author SHA1 Message Date
Vitaliy Filippov b5711e9cbf Use fs.readFileSync to read config file instead of require 2024-08-13 11:19:38 +03:00
Vitaliy Filippov 36dc6298d2 Use webpack to pack 2024-08-13 02:20:08 +03:00
Vitaliy Filippov bc2d637578 Add installation instructions for Vitastor backend 2024-08-12 01:36:42 +03:00
Vitaliy Filippov b543695048 Add example Vitastor backend configs 2024-08-11 17:24:05 +03:00
Vitaliy Filippov 90024d044d Configure "legacy" werelogs because otherwise MultipleBackendGateway was skipping messages 2024-08-04 01:22:48 +03:00
Vitaliy Filippov 451ab33f68 Use config.workers instead of config.clusters 2024-08-03 14:10:39 +03:00
Vitaliy Filippov c86107e912 Add authdata config file reference to config.json 2024-08-03 01:36:01 +03:00
Vitaliy Filippov 0a5962f256 Require scality kms only if kms backend is scality 2024-08-03 01:29:04 +03:00
Vitaliy Filippov 0e292791c6 Setup backends in config.json 2024-08-02 01:45:38 +03:00
Vitaliy Filippov fc07729bd0 Use ^versions 2024-08-02 01:44:13 +03:00
Vitaliy Filippov 4527dd6795 Do not store actual configs in git 2024-08-01 15:52:02 +03:00
Vitaliy Filippov 05fb581023 Use x-amz-storage-class instead of x-amz-meta-scal-location-constraint
FIXME: Ideally, both locations and storage classes should be supported
2024-07-28 02:00:38 +03:00
Vitaliy Filippov 956739a04e Use internal vaultclient for utapi server 2024-07-23 16:32:48 +03:00
Vitaliy Filippov 7ad0888a66 Change git dependency URLs 2024-07-21 17:36:47 +03:00
Vitaliy Filippov bf01ba4ed1 Change git dependency URLs 2024-07-21 15:26:06 +03:00
Vitaliy Filippov ab019e7e50 Make vaultclient dependency optional 2024-07-21 14:19:54 +03:00
Vitaliy Filippov 3797695e74 Make bucketclient dependency optional 2024-07-18 11:17:05 +03:00
Vitaliy Filippov c8084196c4 Remove remote management 2024-07-16 20:34:11 +03:00
bert-e b72e918ff9 Merge branch 'w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.8/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 22887f47d8 Merge branch 'w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 0cd10a73f3 Merge branch 'w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
bert-e e139406612 Merge branch 'bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
Maha Benzekri d91853a38b
processBucketPolicy fixup for objectDelete
Introduced by https://github.com/scality/cloudserver/pull/5580
we now do send a requestContext with no specific resource instead
of "null", which results in a policy evaluation error.
As we get an implicit deny for the requestType "objectDelete",
cause the processed result to be false , thus sending an empty
array of objects to vault , resulting in a deny even when the policy
allows the action on specific objects.

Linked Issue : https://scality.atlassian.net/browse/CLDSRV-555
2024-07-15 14:20:08 +02:00
Mickael Bourgois a7e798f909
CLDSRV-544: bump version 8.8.27 2024-07-03 19:08:02 +02:00
Mickael Bourgois 3a1ba29869
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-544-stderr' into w/8.8/improvement/CLDSRV-544-stderr 2024-07-03 19:07:41 +02:00
Mickael Bourgois dbb9b6d787
CLDSRV-544: bump version 8.7.48 2024-07-03 18:52:35 +02:00
Mickael Bourgois fce76f0934
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-544-stderr' into w/8.7/improvement/CLDSRV-544-stderr 2024-07-03 18:52:20 +02:00
Mickael Bourgois 0e39aaac09
CLDSRV: bump version 8.6.27 2024-07-03 18:48:28 +02:00
Mickael Bourgois 0b14c93fac
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-544-stderr' into w/8.6/improvement/CLDSRV-544-stderr 2024-07-03 18:48:12 +02:00
Mickael Bourgois ab2960bbf4
CLDSRV-544: bump version 2024-07-01 12:28:23 +02:00
Mickael Bourgois 7305b112e2
Merge remote-tracking branch 'origin/improvement/CLDSRV-544-stderr' into w/7.70/improvement/CLDSRV-544-stderr 2024-07-01 12:28:07 +02:00
Mickael Bourgois cd9e2e757b
CLDSRV-544: bump version 2024-06-30 21:15:52 +02:00
Mickael Bourgois ca0904f584
CLDSRV-544 Add timestamp on stderr utapi v1 2024-06-30 21:15:52 +02:00
Mickael Bourgois 0dd3dd35e6
CLDSRV-544: Add timestamp on stderr
The previous version would not exit the master of the cluster
Now it exits as it should do
2024-06-30 21:15:52 +02:00
bert-e bf7e4b7e23 Merge branch 'w/8.7/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:30 +00:00
bert-e 92f4794727 Merge branch 'w/8.6/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:29 +00:00
Jonathan Gramain c6ef85e3a1 Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-fixup-version' into w/8.6/bugfix/CLDSRV-547-fixup-version 2024-06-27 14:05:27 -07:00
Jonathan Gramain c0fe0cfbcf CLDSRV-547 [fixup] bump version to 7.70.49
Fixup the version, as 7.70.48 was already tagged
2024-06-27 11:42:37 -07:00
bert-e 9c936f2b83 Merge branch 'w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
bert-e d26bac2ebc Merge branch 'w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
Jonathan Gramain cfb9db5178 Merge branch 'w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:53:41 -07:00
Jonathan Gramain 2ce004751a Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:32:45 -07:00
Jonathan Gramain 539219e046 CLDSRV-547 bump cloudserver version 2024-06-27 10:27:45 -07:00
Jonathan Gramain be49e55db5 bf: CLDSRV-547 update redis config for utapi reindex
Update the redis configuration of utapi reindex to include a list of
sentinels, rather than a single sentinel (previously set to
"localhost" in Federation).

I took this opportunity to cleanup tech debt related to parsing redis
configuration, using "joi" for validation instead and making it common
across the three different places where redis config is parsed. Not
doing so would have required yet another copy-paste of dumb and
error-prone validation code. Added unit tests for the new validation.
2024-06-27 10:25:10 -07:00
bert-e e6b240421b Merge branch 'w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.8/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
bert-e 81739e3ecf Merge branch 'w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
Jonathan Gramain c475503248 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-25 18:40:18 -07:00
bert-e 7acbd5d2fb Merge branch 'bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:39:02 +00:00
Jonathan Gramain 8d726322e5 CLDSRV-549 restore 'git.commit-sha' and 'git.repository' labels
Add back the 'git.commit-sha' and 'git.repository' labels to pushed
images, which were not attached anymore after the change of registry.
2024-06-25 18:26:54 -07:00
williamlardier 4f7aa54886 CLDSRV-541: bump project version 2024-06-13 13:58:54 +02:00
williamlardier 0117a5b0b4 CLDSRV-541: add unit test for deleteobjects authz 2024-06-13 13:58:54 +02:00
williamlardier f679831ba2 CLDSRV-541: update unit tests 2024-06-13 13:56:18 +02:00
williamlardier bb162ca7d3 CLDSRV-541: send request context in deleteobjects to get quota information 2024-06-13 11:58:33 +02:00
williamlardier 0c6dfc7b6e CLDSRV-537: bump project version 2024-05-31 13:47:26 +02:00
williamlardier d608d849df CLDSRV-537: bump checkout version for alerts 2024-05-31 13:47:26 +02:00
williamlardier 2cb63f58d4 CLDSRV-537: bump action-prom-render-test version 2024-05-31 13:44:05 +02:00
williamlardier 51585712f4 CLDSRV-537: do not raise quota error if no quota is defined
This ensures fresh installs, or buckets that get empty-ed are
not triggering the alert by mistake
2024-05-31 13:44:05 +02:00
bert-e 61eb24e46f Merge branch 'w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a34b162782 Merge branch 'w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.8/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a9e50fe046 Merge branch 'w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
bert-e 4150a8432e Merge branch 'bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
Taylor McKinnon 7e70ff9cbc Disable git clone protection to work around git bug affecting git-lfs 2024-05-22 10:05:17 -07:00
bert-e 09dc45289c Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:31 +00:00
bert-e 47c628e0e1 Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:30 +00:00
Nicolas Humbert a1f4d3fe8a CLDSRV-529 use shorthand utapi dependency format 2024-05-17 15:10:40 +02:00
williamlardier 926242b077 CLDSRV-553: bump project version 2024-05-17 12:35:59 +02:00
williamlardier aa2aac5db3 CLDSRV-553: functional restore test to simulate cold backend calls 2024-05-17 12:35:59 +02:00
williamlardier f2e2d82e51 CLDSRV-553: unit test the onlyCheckQuota flag 2024-05-17 12:35:59 +02:00
williamlardier 88ad86b0c6 CLDSRV-553: adapt calls to quota evaluation
When the API is being called by a cold backend, the
x-scal-s3-version-id header is set. In this case, the quotas must
be evaluated with a 0 inflight.
2024-05-17 12:35:59 +02:00
bert-e 8f25892247 Merge branch 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:32 +00:00
bert-e 9ac207187b Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:31 +00:00
Anurag Mittal 624a04805f
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-529/bump_utapi' into w/8.6/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:40:00 +02:00
Anurag Mittal ba99933765
Merge remote-tracking branch 'origin/bugfix/CLDSRV-529/bump_utapi' into w/7.70/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:36:36 +02:00
williamlardier 38d1ac1d2c CLDSRV-553: conditionnaly force evaluating quotas with 0 inflight
A corner case was found, where any PUT from the cold backend would
fail if the quota is already exceeded, as the storage was reserved
for the restore, but the restore itself requires some more bytes
as inflights when evaluating quotas. By passing a flag in the quota
evaluation function, we ensure that we can, in these cases,
evaluate the quotas with 0 inflight.
2024-05-17 08:06:35 +02:00
Taylor McKinnon 4f34a34a11 bf(CLDSRV-529): Bump version 2024-05-16 12:19:45 -07:00
Taylor McKinnon 53f2a159fa bf(CLDSRV-529): Bump utapi 2024-05-16 12:18:24 -07:00
Maha Benzekri 63f6a75a86
CLDSRV-530: bump project version 2024-05-10 18:36:01 +02:00
Maha Benzekri 41acc7968e
CLDSRV-530: from accountwithQuota to accountWithQuotaCount 2024-05-10 18:32:07 +02:00
williamlardier c98c5207fc CLDSRV-520: bump project version 2024-05-10 09:51:02 +02:00
williamlardier 615ee393a4 CLDSRV-520: fix federation image with tsc 2024-05-10 09:51:02 +02:00
williamlardier 97dfc699aa CLDSRV-520: bump cloudserver version 2024-05-10 08:12:35 +02:00
williamlardier 76786282d1 CLDSRV-520: deduplicate quota logic 2024-05-10 08:12:35 +02:00
williamlardier a19d6524be CLDSRV-520: generic quota retrieval latency dashboard 2024-05-10 08:12:35 +02:00
williamlardier bbf6dfba22 CLDSRV-520: monitor quota cleanup
The finalization of quota logic will always be executed. Some tests
are added to ensure the inflights are only cleaned when they are
enabled, and an error happened in the API.
In any case, this ensures we monitor quotas in a single place,
for each of the executed action, and compute correctly the total
duration of the quota impact on the API.
2024-05-10 08:11:27 +02:00
williamlardier f0663fd507 CLDSRV-520: add dashboards 2024-05-10 08:11:27 +02:00
williamlardier d4decbbd6c CLDSRV-520: add alerts 2024-05-10 08:11:27 +02:00
williamlardier 288b2b7b87 CLDSRV-520: observe number of buckets and accounts with quota 2024-05-10 08:11:27 +02:00
williamlardier ccf9b62e59 CLDSRV-520: observe metrics during quota evaluations 2024-05-10 08:11:27 +02:00
williamlardier 9fc2d552ae CLDSRV-520: add metrics for quota 2024-05-07 17:56:24 +02:00
williamlardier d7cc4cf7d5 CLDSRV-515: adapt dockerfile for scubaclient 2024-05-07 16:24:25 +02:00
williamlardier 334d33ef44 CLDSRV-515: unit testing 2024-05-07 16:24:25 +02:00
williamlardier 989b0214d9 CLDSRV-515: functional testing 2024-05-07 16:21:13 +02:00
williamlardier 04d0730f97 CLDSRV-515: clear inflights in case of quota exceeded
- If the quotas are evaluated with success and inflights are
  enabled, it means the quota service will store the information
  and persist it till the next update of the utilization metrics.
  In this case, aany API that will fail after authorization would
  still mean that the bytes are considered, even if nothing was
  written. To overcome that, we call a function from the quota
  evaluation logic to erase anything that we wrote during the
  authorization.
2024-05-07 16:21:13 +02:00
williamlardier fbc642c022 CLDSRV-515: evaluate quotas
Quotas are evaluated:
- As part of the authorization process, after both the bucket and
  the object are authorized. The checks are skipped if the API does
  not need any quota evaluation, if the inflight bytes are 0 (i.e.,
  no data added, so no need to check the quota).
- The Copy APIs will evaluate the quotas when the source object is
  checked. In this particular case, the action is objectGet, so a
  flag is passed to force the quota evaluation logic. A subsequent
  check is done in the logic.
- The restoreObject API has a special case where the extension of
  the restoration duration would still cause the evaluation of the
  quotas, causing a potential increase in the inflights stored. We
  detect this case and remove any added inflight.
2024-05-07 16:21:13 +02:00
williamlardier 104435f0b6 CLDSRV-515: implement the quota logic as an helper file 2024-05-07 16:21:13 +02:00
williamlardier a362ac202e CLDSRV-515: bootstrap scuba on startup 2024-05-07 16:21:13 +02:00
williamlardier 1277e58150 CLDSRV-515: create a wrapper for scubaclient and quota service 2024-05-07 16:21:13 +02:00
williamlardier 7727ccf5f0 CLDSRV-515: add configuration for quotas
- Quota service is generic. We only support scuba backend now,
  but we can add others later, if needed, as long as they share
  the same implementation as the scuba client.
- Scuba configuration is passed for the scubaclient tool.
- Ability to disable the inflights is provided. This changes the
  behavior of the quota checks, so that the inflights won't be
  part of the request to the utilization metrics services. This
  reduces the complexity of the quota evaluation logic in case
  of error, as no cleanup will be needed in this case. This,
  however, requires a backend that can provide up to date metrics
  (i.e., <2s).
2024-05-05 15:31:34 +02:00
williamlardier 71860fc90c CLDSRV-515: do not recreate variable at every authz 2024-05-05 15:31:04 +02:00
williamlardier e504b52de7 CLDSRV-515: bump arsenal and vaultclient, introduce scubaclient 2024-05-02 15:09:23 +02:00
Maha Benzekri b369a47c4d CLDSRV-516: add tests 2024-05-02 14:44:31 +02:00
Maha Benzekri b4fa81e832 CLDSRV-516: implement BucketDeleteQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 1e03d53879 CLDSRV-516: implement BucketGetQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 63e502d419 CLDSRV-516: implement UpdateBucketQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri d2a31dc20a CLDSRV-516: specify the signature version of old auth tests
This is unrelated to the quotas, but blocks the CI.
2024-05-02 14:44:28 +02:00
Maha Benzekri f24411875f CLDSRV-516: introduce quota APIs in router 2024-05-02 14:28:56 +02:00
Maha Benzekri 4fd7faa6a3 CLDSRV-516: bump arsenal version 2024-05-02 14:27:44 +02:00
Francois Ferrand 118aaba702
Use sproxyd from ghcr
Issue: CLDSRV-524
2024-04-18 20:38:37 +02:00
Francois Ferrand e4442fdc52
Merge branch 'w/8.7/improvement/CLDSRV-524' into w/8.8/improvement/CLDSRV-524 2024-04-16 18:36:03 +02:00
Francois Ferrand 7fa199741f
Merge branch 'w/8.6/improvement/CLDSRV-524' into w/8.7/improvement/CLDSRV-524 2024-04-16 18:35:32 +02:00
Francois Ferrand f7f95af78f
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 18:34:49 +02:00
Francois Ferrand 2dc053a784
Merge branch 'w/7.70/improvement/CLDSRV-524' into w/8.6/improvement/CLDSRV-524 2024-04-16 17:57:54 +02:00
Francois Ferrand cc9bb9047e
Merge branch 'improvement/CLDSRV-524' into w/7.70/improvement/CLDSRV-524 2024-04-16 16:58:57 +02:00
Francois Ferrand b824fc0828
Use official docker build steps
The docker-build step from `scality/workflows/` fails to login to
 ghcr, as it picks up the old registry creds.

Issue: CLDSRV-524
2024-04-16 16:54:51 +02:00
Francois Ferrand a2e6d91cf2
Build pykmip image
Issue: CLDSRV-524
2024-04-16 16:54:41 +02:00
Francois Ferrand c1060853dd
Upgrade actions
- artifacts@v4
- cache@v4
- checkout@v4
- codeql@v3
- dependency-review@v4
- login@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5

Issue: CLDSRV-524
2024-04-16 16:54:23 +02:00
Francois Ferrand 227d6edd09
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 16:54:02 +02:00
bert-e b4754c68ea Merge branches 'w/8.8/bugfix/CLDSRV-518/duplication' and 'q/5548/8.7/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.8 2024-03-25 12:56:17 +00:00
bert-e 11aea5d93b Merge branches 'w/8.7/bugfix/CLDSRV-518/duplication' and 'q/5548/8.6/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.7 2024-03-25 12:56:17 +00:00
bert-e 0c50a5952f Merge branches 'w/8.6/bugfix/CLDSRV-518/duplication' and 'q/5548/7.70/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.6 2024-03-25 12:56:16 +00:00
bert-e 4a32e05855 Merge branches 'w/7.70/bugfix/CLDSRV-518/duplication' and 'q/5548/7.10/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/7.70 2024-03-25 12:56:16 +00:00
bert-e 402ed21b14 Merge branch 'bugfix/CLDSRV-518/duplication' into q/7.10 2024-03-25 12:56:16 +00:00
Nicolas Humbert a22719ed47 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-518/duplication' into w/8.8/bugfix/CLDSRV-518/duplication 2024-03-20 08:48:00 +01:00
Nicolas Humbert 41975d539d Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-518/duplication' into w/8.7/bugfix/CLDSRV-518/duplication 2024-03-19 18:12:42 +01:00
Nicolas Humbert c6724eb811 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-518/duplication' into w/8.6/bugfix/CLDSRV-518/duplication 2024-03-19 05:54:35 +01:00
Nicolas Humbert d027006938 Merge remote-tracking branch 'origin/bugfix/CLDSRV-518/duplication' into w/7.70/bugfix/CLDSRV-518/duplication 2024-03-14 20:50:08 +01:00
Nicolas Humbert 92cfd47572 CLDSRV-518 Duplication of version ID in metadata 2024-03-14 16:33:25 +01:00
bert-e 8796bf0f44 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 735fcd04ef Merge branch 'w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
Jonathan Gramain c5522685b2 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:04:21 -07:00
Jonathan Gramain 48df7df271 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:02:52 -07:00
Jonathan Gramain e028eb227f CLDSRV-513 bump cloudserver 2024-03-13 14:00:55 -07:00
Nicolas Humbert caf3146662 CLDSRV-518 fix Ruby dependency: excon
(cherry picked from commit cc1607eaaecb97ab5c48da15f1b1449fe7a4680f)
2024-03-13 13:58:41 -07:00
bert-e 1dee707eb8 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 17:36:39 +00:00
Jonathan Gramain 2c8d69c20a Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 10:18:39 -07:00
Jonathan Gramain 0b2b6ceeb5 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:46:43 -07:00
Jonathan Gramain f4b3f39dc6 Merge remote-tracking branch 'origin/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:39:49 -07:00
Jonathan Gramain 84260340d0 CLDSRV-513 bump arsenal dependency 2024-03-13 09:34:30 -07:00
Jonathan Gramain e531abc346 bf: CLDSRV-513 fix request logger for batchDelete
Arsenal's `DataWrapper.batchDelete()` now already creates a request
logger on which it calls `end()` to get the elapsed time. So as
there's no need to create one before the call, remove the
corresponding code.

Note that the main fix is the arsenal version bump which, by creating
a request logger, fixes naturally the forgotten case in
`checkHashMatchMD5`.
2024-03-13 09:31:10 -07:00
Jonathan Gramain 20f6e3089b CLDSRV-513 bump werelogs dependency 2024-03-13 09:31:10 -07:00
bert-e 9dc34f2155 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:14 +00:00
bert-e 08a4c3ade3 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:13 +00:00
Nicolas Humbert d5c731856b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:51:36 +01:00
Nicolas Humbert 584c94692b Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:16:03 +01:00
Nicolas Humbert a0e5257c75 CLDSRV-501 bump arsenal 2024-03-07 10:09:28 +01:00
bert-e 5435c14116 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:40 +00:00
bert-e 38c44ea874 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:39 +00:00
Nicolas Humbert 4200346dd2 CLDSRV-501 skip tests related to Backbeat routes for replication 2024-03-01 17:16:36 +01:00
bert-e 5472d0da59 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
bert-e cdc0bb1128 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
Nicolas Humbert 795f8bcf1c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:44:42 +01:00
Nicolas Humbert 9371d8d734 Merge remote-tracking branch 'origin/bugfix/CLDSRV-501/putmetadata' into w/7.70/bugfix/CLDSRV-501/putmetadata 2024-02-29 08:56:30 +01:00
Nicolas Humbert 3f31c7f3a1 CLDSRV-501 PutMetadata should write metadata on top of a null version 2024-02-27 14:29:35 +01:00
KillianG 39cba3ee6c
Merge remote-tracking branch 'origin/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust' into w/8.8/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust 2024-02-27 11:31:55 +01:00
KillianG a00952712f
Bump 8.7.47
Issue: CLDSRV-512
2024-02-27 10:41:34 +01:00
KillianG a246e18e17
Updatest test for startRestore
Issue: CLDSRV-512
2024-02-27 10:26:19 +01:00
KillianG 3bb3a4d161
Use scaledMsPerDay when restore-adjust
Use scaledMsPerday when restoring an object that has already been restored to be able to make the time go faster for testing purpose

Issue: CLDSRV-512
2024-02-27 10:26:11 +01:00
bert-e c6ba7f981e Merge branches 'w/8.8/bugfix/CLDSRV-498/null' and 'q/5526/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.8 2024-02-21 13:57:14 +00:00
bert-e 69c82da878 Merge branches 'w/8.6/bugfix/CLDSRV-498/null' and 'q/5526/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.6 2024-02-21 13:57:13 +00:00
bert-e 762ae5a0ff Merge branches 'w/8.7/bugfix/CLDSRV-498/null' and 'q/5526/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.7 2024-02-21 13:57:13 +00:00
bert-e 89dfc794a6 Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/7.70 2024-02-21 13:57:12 +00:00
bert-e 3205d117f5 Merge branches 'w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.8 2024-02-20 13:05:07 +00:00
bert-e 4eafae44d8 Merge branches 'w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/7.70/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.6 2024-02-20 13:05:06 +00:00
bert-e 4cab3c84f3 Merge branches 'w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.7 2024-02-20 13:05:06 +00:00
bert-e e3301a2db9 Merge branch 'bugfix/CLDSRV-508-fix-bucket-tagging' into q/7.70 2024-02-20 13:05:05 +00:00
williamlardier 0dcc93cdbe Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:49:56 +01:00
williamlardier 2f2f91d6e8 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:48:05 +01:00
williamlardier a28b141dfb Merge remote-tracking branch 'origin/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:43:22 +01:00
williamlardier 46fe061895 CLDSRV-508: bump project version 2024-02-20 12:44:23 +01:00
williamlardier 34202eaa62 CLDSRV-508: add tests for bucket tagging APIs 2024-02-20 12:44:07 +01:00
williamlardier 4d343fe468 CLDSRV-508: standardize XML with object tagging API 2024-02-20 12:42:34 +01:00
williamlardier 229e641f88 CLDSRV-508: add missing parameters in buckjet tagging APIs 2024-02-20 12:42:18 +01:00
bert-e 1433973e5c Merge branch 'w/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 201170b1ed Merge branch 'w/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e f13985094e Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.6/bugfix/CLDSRV-498/null 2024-02-20 11:24:07 +00:00
Nicolas Humbert 395033acd2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-498/null' into w/7.70/bugfix/CLDSRV-498/null 2024-02-20 12:13:38 +01:00
Nicolas Humbert 632ef26826 CLDSRV-498 Handling isNull master version with no versionId
In certain cases, a master version may not have a versionId and be set as null (isNull:true). For instance, this occurs when a customer:

Create a bucket.

Put an object to it.

Put bucket versioning.

Put metadata (BackbeatClient.putMetadata), which results in the master version being set to null (isNull:true) with no versionId.

Currently, if an object is put after these steps, CloudServer fails to appropriately generate a null version. This is because CloudServer doesn't handle situations where the master version is set to isNull:true with no versionId.

The correct approach when an object is put should be to:

Create the new version key.

Create a new null version key, assigning it a “default non-version version id”.

Update this “default non-version version id” to the `nullVersionId` field of the master key.
2024-02-20 12:04:53 +01:00
bert-e 242b2ec85a Merge branches 'w/8.8/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.7/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.8 2024-02-19 15:00:59 +00:00
bert-e 3186a97113 Merge branches 'w/8.7/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.6/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.7 2024-02-19 15:00:59 +00:00
bert-e 3861b8d317 Merge branch 'q/5534/7.10/bugfix/CLDSRV-505-ip-handling-fix' into tmp/normal/q/7.70 2024-02-19 15:00:58 +00:00
bert-e bb278f7d7e Merge branches 'w/8.6/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/7.70/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.6 2024-02-19 15:00:58 +00:00
bert-e 3b9309490d Merge branch 'bugfix/CLDSRV-505-ip-handling-fix' into q/7.10 2024-02-19 15:00:57 +00:00
Will Toozs 0118dfabbb
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-505-ip-handling-fix' into w/8.8/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:40:58 +01:00
Will Toozs ff40dfaadf
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-505-ip-handling-fix' into w/8.7/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:25:18 +01:00
Will Toozs 9a31236da0
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-505-ip-handling-fix' into w/8.6/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:22:08 +01:00
Will Toozs 61ebacfbf3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-505-ip-handling-fix' into w/7.70/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 14:26:43 +01:00
Will Toozs aa646ced28
CLDSRV-505: bump CS version 2024-02-19 12:00:41 +01:00
Will Toozs f2ca37b5fb
CLDSRV-505: update ip check tests for arrays 2024-02-19 12:00:41 +01:00
Will Toozs 9d74cedde8
CLDSRV-505: update ip check for arrays 2024-02-19 12:00:41 +01:00
bert-e 9c99a6980f Merge branches 'w/8.8/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.8 2024-02-19 10:16:26 +00:00
bert-e d4e255781b Merge branches 'w/8.7/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.7 2024-02-19 10:16:26 +00:00
bert-e f5763d012e Merge branches 'w/8.6/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.70/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.6 2024-02-19 10:16:24 +00:00
bert-e 8fb740cf09 Merge branch 'bugfix/CLDSRV-507-bp-fixes' into q/7.10 2024-02-19 10:16:23 +00:00
bert-e 55c8d89de2 Merge branches 'w/7.70/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.10/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/7.70 2024-02-19 10:16:23 +00:00
bert-e 1afaaec0ac Merge branch 'w/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.8/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:24 +00:00
bert-e e20e458971 Merge branch 'w/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.7/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:23 +00:00
williamlardier 56e52de056 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-507-bp-fixes' into w/8.6/bugfix/CLDSRV-507-bp-fixes 2024-02-19 10:01:09 +01:00
williamlardier d9fc4aae50 Merge remote-tracking branch 'origin/bugfix/CLDSRV-507-bp-fixes' into w/7.70/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:54:06 +01:00
williamlardier 08de09a2ab CLDSRV-507: bump arsenal version 2024-02-19 09:48:13 +01:00
bert-e bef9220032 Merge branches 'w/8.8/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.8 2024-02-15 18:43:31 +00:00
bert-e de20f1efdc Merge branches 'w/8.7/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.6/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.7 2024-02-15 18:43:31 +00:00
bert-e 4817f11f36 Merge branches 'w/8.6/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.70/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.6 2024-02-15 18:43:30 +00:00
bert-e a6b283f5a2 Merge branch 'bugfix/CLDSRV-497/putmetadata' into q/7.10 2024-02-15 18:43:29 +00:00
bert-e 3f810a7596 Merge branches 'w/7.70/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.10/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/7.70 2024-02-15 18:43:29 +00:00
bert-e b89d19c9f8 Merge branch 'w/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:58:27 +00:00
Nicolas Humbert 4dc9788629 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-497/putmetadata' into w/8.7/bugfix/CLDSRV-497/putmetadata 2024-02-15 18:43:28 +01:00
Nicolas Humbert 65a891d6f8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-497/putmetadata' into w/8.6/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:51:48 +01:00
bert-e 2ecca4feef Merge branch 'bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/7.70/bugfix/CLDSRV-497/putmetadata 2024-02-15 16:34:04 +00:00
Nicolas Humbert c52a3a6e44 CLDSRV-497 Fix BackbeatClient.putMetadata with versionID
Issue: When Cloudserver BackbeatClient.putMetadata() option fields are sent to Metadata through the query string, they are converted to strings. As a result, Metadata interprets the value undefined in the versionId field as an empty string ('').

Background: Previously, the 'crrExistingObject' script used this bug/behavior as a workaround to generate an internal version ID to replicate null version (= objects created before versioning was enabled). However, this approach has led to inconsistencies, occasionally resulting in the creation of multiple null internal versions.

Resolution: To address this issue, the 'crrExistingObject' workaround will be deprecated. Instead, Backbeat will be enhanced to support the replication of null versions directly, thereby ensuring more reliable and consistent behavior in handling versioning.
2024-02-15 17:31:23 +01:00
williamlardier d82965ff78 CLDSRV-507: normalize request types 2024-02-15 09:26:45 +01:00
williamlardier f488a65f15 CLDSRV-507: support no object metadata for MPU APIs resource authz
The MPU APIs are dealing with object resources. At the time the
Bucket Policies and ACLs were only evaluated when there was an
Allow from IAM, there was no need to handle this case.
However now, these APIs are evaluating the bucket policies and
ACLs and because there is no object metadata associated, we
end up allowing requests without any permission by relying
on the existing code, where the permission is changed and becomes
"bucketGet". We must treat MPU APIs as different APIs and check
the right permission. For that, we rely on the updated bucket
policy action map in arsenal wth these APIs, and ensure that we
properly map that to the existing logic where we only checked the
"objectPut" permission to handle these 3 specific APIs:

- initiate MPU
- upload part
- complete MPU
2024-02-14 15:16:48 +01:00
williamlardier 40a575a717 CLDSRV-507: use correct action for put part APIs 2024-02-14 15:16:48 +01:00
williamlardier fea82f15ea CLDSRV-507: use correct action for MPU 2024-02-14 15:16:48 +01:00
bert-e 06dc042154 Merge branches 'w/8.8/improvement/CLDSRV-502' and 'q/5528/8.7/improvement/CLDSRV-502' into tmp/octopus/q/8.8 2024-02-08 13:49:18 +00:00
bert-e aa4643644a Merge branches 'w/8.7/improvement/CLDSRV-502' and 'q/5528/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.7 2024-02-08 13:49:18 +00:00
bert-e 89edf7e3d0 Merge branch 'w/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.6 2024-02-08 13:49:18 +00:00
Francois Ferrand 4c7d3ae4bc
Merge branch 'w/8.7/improvement/CLDSRV-502' into w/8.8/improvement/CLDSRV-502 2024-02-05 18:50:27 +01:00
Francois Ferrand 23883dae8b
Merge branch 'w/8.6/improvement/CLDSRV-502' into w/8.7/improvement/CLDSRV-502 2024-02-05 18:50:12 +01:00
Francois Ferrand e616ffa374
gha: fix test alert trigger to match other premerge build
Issue: CLDSRV-502
2024-02-05 18:49:31 +01:00
Francois Ferrand 515c20e4cf
Merge branch 'w/7.70/improvement/CLDSRV-502' into w/8.6/improvement/CLDSRV-502 2024-02-05 18:48:18 +01:00
Francois Ferrand f8eedddebf
Merge branch 'improvement/CLDSRV-502' into w/7.70/improvement/CLDSRV-502 2024-02-05 18:48:01 +01:00
Francois Ferrand f3654e4fb8
Fix trigger for codeql jobs
Build on pull request & bert-e queue build, and skip rebuild when PR
lands on development branch.

Issue: CLDSRV-502
2024-02-05 18:47:30 +01:00
Francois Ferrand 517fb99190
gha: add release name to release job
Issue: CLDSRV-502
2024-02-05 18:46:02 +01:00
Francois Ferrand 531c83a359
Release 8.8.17
Issue: CLDSRV-500
2024-02-05 17:35:43 +01:00
Francois Ferrand b84fa851f7
Merge branch 'w/8.7/bugfix/CLDSRV-500' into w/8.8/bugfix/CLDSRV-500 2024-02-05 17:35:20 +01:00
Francois Ferrand 4cb1a879f7
Release 8.7.44
Issue: CLDSRV-500
2024-02-05 17:34:45 +01:00
Francois Ferrand 7ae55b20e7
Merge branch 'bugfix/CLDSRV-500' into w/8.7/bugfix/CLDSRV-500 2024-02-05 17:32:53 +01:00
Francois Ferrand d0a6fa17a5
Release 8.6.24
Issue: CLDSRV-500
2024-02-05 17:31:36 +01:00
Francois Ferrand 7275459f70
Use rate interval in `Request time` panel
- Should use $__rate_interval, which handles small time range.
- Regenerating the dashboard also fixes the 'latency per s3 action'
  panel.

Issue: CLDSRV-500
2024-02-01 15:49:29 +01:00
Hervé Dombya 363afcd17f CLDSRV-473: fix cors issues in getVeeamFile 2024-01-26 15:59:10 +01:00
Frédéric Meinnel 1cf0250ce9 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.8/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:06:05 +01:00
Frédéric Meinnel 20d0b38d0b Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:05:39 +01:00
Frédéric Meinnel 9988a8327a Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:06:31 +01:00
Frédéric Meinnel b481d24637 Merge remote-tracking branch 'origin/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:01:59 +01:00
Frédéric Meinnel 71625774c1 CLDSRV-494: version bump 2024-01-23 13:42:36 +01:00
Frédéric Meinnel 9b9338f2b8 CLDSRV-494: Fix generateV4Headers for HTTP PUT with body 2024-01-23 13:42:31 +01:00
Frédéric Meinnel 601619f200 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.8/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:24:05 +01:00
Frédéric Meinnel a92e71fd50 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:22:55 +01:00
Frédéric Meinnel 8802ea0617 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:21:42 +01:00
Frédéric Meinnel acc5f74787 Merge remote-tracking branch 'origin/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:20:10 +01:00
Frédéric Meinnel e3c093f352 CLDSRV-493: Version bump 2024-01-17 13:18:32 +01:00
Frédéric Meinnel e17383a678 CLDSRV-493: Fix dates accepted in lifecycle configuration 2024-01-17 13:18:32 +01:00
bert-e 43f62b847c Merge branch 'w/8.7/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.8/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e a031905bba Merge branch 'w/8.6/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.7/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e 13ad6881f4 Merge branch 'bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.6/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:38 +00:00
Mickael Bourgois dea5173075
CLDSRV-492: remove duplicate error monitoring 2024-01-16 21:34:26 +01:00
Mickael Bourgois b3f96198fe
CLDSRV-492: update monitoring head 2024-01-15 14:48:08 +01:00
Mickael Bourgois 5e2dd8cccb
Merge remote-tracking branch 'origin/development/7.70' into bugfix/CLDSRV-492-head-monitoring 2024-01-15 11:56:50 +01:00
bert-e cd2406b827 Merge branches 'w/8.8/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.8 2024-01-15 09:47:24 +00:00
bert-e 62f707caff Merge branches 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.7 2024-01-15 09:47:23 +00:00
bert-e f01ef00a52 Merge branches 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.6 2024-01-15 09:47:23 +00:00
bert-e 30fb64e443 Merge branch 'bugfix/CLDSRV-489-redirect-folder-index' into q/7.10 2024-01-15 09:47:22 +00:00
bert-e 054107d8fb Merge branches 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.10/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/7.70 2024-01-15 09:47:22 +00:00
bert-e 848bf318fe Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:46 +00:00
bert-e 0beb48a1fd Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:45 +00:00
bert-e 618d4dffc7 Merge branches 'development/8.6' and 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:44 +00:00
bert-e b5aae192f7 Merge branches 'development/7.70' and 'bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:43 +00:00
Mickael Bourgois 557f3dcde6
CLDSRV-489: fix lint indentation 2024-01-12 10:07:39 +01:00
Mickael Bourgois 3291af36bb
CLDSRV-489: Apply style suggestions
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-12 09:53:57 +01:00
Will Toozs d274acd8ed
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-436-bump-version' into w/8.8/improvement/CLDSRV-436-bump-version 2024-01-11 13:10:57 +01:00
Will Toozs e6d9e8fc35
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-436-bump-version' into w/8.7/improvement/CLDSRV-436-bump-version 2024-01-11 11:50:25 +01:00
Will Toozs b08edefad6
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bump-version' into w/8.6/improvement/CLDSRV-436-bump-version 2024-01-11 11:24:50 +01:00
Will Toozs e9c353d62a
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bump-version' into w/7.70/improvement/CLDSRV-436-bump-version 2024-01-11 11:04:53 +01:00
Will Toozs c7c55451a1
CLDSRV-436: bump package version 2024-01-11 10:45:47 +01:00
bert-e 7bb004586d Merge branch 'w/8.7/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.8/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:49 +00:00
bert-e d48de67723 Merge branch 'w/8.6/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.7/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:48 +00:00
Will Toozs fa4dec01cb
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bp-conds' into w/8.6/improvement/CLDSRV-436-bp-conds 2024-01-10 22:00:37 +01:00
Will Toozs 4f79a9c59c
Merge remote-tracking branch 'origin/improvement/CLDSRV-436-bp-conds' into w/7.70/improvement/CLDSRV-436-bp-conds 2024-01-10 21:43:08 +01:00
Will Toozs 05c759110b
CLDSRV-436: update dummyRequest of unit tests 2024-01-10 21:02:15 +01:00
Will Toozs deae294a81
CLDSRV-436: unit test policy condition validation 2024-01-10 21:02:15 +01:00
Will Toozs ab587385e6
CLDSRV-436: add functional test cases for conditions 2024-01-10 21:01:44 +01:00
Will Toozs 6243911072
CLDSRV-436: update tests 2024-01-10 20:59:26 +01:00
Will Toozs da804054e5
CLDSRV-436: update put retention logic 2024-01-10 20:57:38 +01:00
Will Toozs 493a6da773
CLDSRV-436: update put policy logic 2024-01-10 20:57:38 +01:00
Will Toozs 7ecdd11783
CLDSRV-436: add conditions logic 2024-01-10 20:57:37 +01:00
Mickael Bourgois 7e53b67c90
CLDSRV-492: fix monitoring for website head
Match head before the merging in CLDSRV-482
2024-01-10 20:29:20 +01:00
bert-e b141c59bb7 Merge branch 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 0b79ecd942 Merge branch 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 86ece5c264 Merge branch 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:56 +00:00
Mickael Bourgois 0b79cd6af6
Merge remote-tracking branch 'origin/bugfix/CLDSRV-489-redirect-folder-index' into w/7.70/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 19:32:46 +01:00
Mickael Bourgois a51b5e0af3
CLDSRV-489: test redirect 302 on folder without / 2024-01-10 19:10:57 +01:00
bert-e 10ca6b98fa Merge branch 'w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.8/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 171925732f Merge branch 'w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
Taylor McKinnon 6d36f9c867 Merge remote-tracking branch 'origin/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 10:04:49 -08:00
Taylor McKinnon 1a21c4f867 impr(CLDSRV-475): Bump version to 7.70.41 2024-01-10 10:02:36 -08:00
Taylor McKinnon 866dec1b81 impr(CLDSRV-475): Add isDeleteMarker to overhead fields 2024-01-10 10:02:15 -08:00
Mickael Bourgois 9491e82235
CLDSRV-489: redirect 302 on folder without /
If a key is not found, we must check if key/index.html
is accessible to redirect to append a trailing /
to the key

@see https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html#IndexDocumentsandFolders
2024-01-10 17:39:13 +01:00
bert-e 70e8b20af9 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 0ec5f4fee5 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 6c468a01d9 Merge branch 'w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
bert-e 3d2b75f344 Merge branch 'bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
Mickael Bourgois 5811fa5326
CLDSRV-485: fix linter in tests for 8.6 2024-01-10 13:50:11 +01:00
bert-e e600677545 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 72e5da10b7 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
Mickael Bourgois de0e7e6449
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 13:15:29 +01:00
Mickael Bourgois 97b5ed6dd3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-485-custom-err-redirect' into w/7.70/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:09:35 +01:00
Mickael Bourgois dad8a3ee37
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:02:54 +01:00
Mickael Bourgois 8aca658c5c
CLDSRV-485: bump arsenal 2024-01-10 11:52:27 +01:00
bert-e 759817c5a0 Merge branch 'w/8.7/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e 035c7e8d7f Merge branch 'w/8.6/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
Mickael Bourgois b8af1225d5
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-482-head-redirect-index' into w/8.6/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:28:13 +01:00
Mickael Bourgois 40faa5f3fa
Merge remote-tracking branch 'origin/bugfix/CLDSRV-482-head-redirect-index' into w/7.70/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:19:09 +01:00
Mickael Bourgois 1fc8622614
Merge remote-tracking branch 'origin/development/7.10' into bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:01:51 +01:00
Mickael Bourgois a0acefb4a8
CLDSRV-482: apply style suggestion
Co-authored-by: William <91462779+williamlardier@users.noreply.github.com>
2024-01-10 10:13:08 +01:00
bert-e de27a5b88e Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e a4cc5e45f3 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e 621cb33680 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
bert-e b025443d21 Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
Mickael Bourgois d502a81284
CLDSRV-488: fix lint 2024-01-10 09:56:27 +01:00
bert-e 9a8b707e82 Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:32 +00:00
bert-e 002dbe0019 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e 59e52f6df2 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e b52f2356ba Merge branch 'bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/7.70/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:30 +00:00
Mickael Bourgois 60679495b6
CLDSRV-488: apply review suggestion
Co-authored-by: Jonathan Gramain <jonathan.gramain@scality.com>
2024-01-10 09:53:24 +01:00
Mickael Bourgois 9dfacd0827
CLDSRV-482: factorize website GET and HEAD 2024-01-09 18:45:02 +01:00
Mickael Bourgois 485ef1e9bb
CLDSRV-482: test routing and implicit index 2024-01-09 17:18:07 +01:00
Mickael Bourgois 5e041ca5e7
CLDSRV-482: fix head implicit index
Routing check must be performed before added index prefix
To prevent matching a routing rule on the index
2024-01-09 17:18:07 +01:00
Mickael Bourgois 52137772d9
Merge branch 'development/7.10' into bugfix/CLDSRV-488-error-type-bp 2024-01-09 16:44:18 +01:00
Mickael Bourgois fcf193d033
CLDSRV-488: move website condition, replace flag 2024-01-09 16:40:55 +01:00
Mickael Bourgois fb61cad786
CLDSRV-485: test website redirect custom error 2024-01-08 18:00:32 +01:00
Mickael Bourgois b6367eb2b8
CLDSRV-485: website redirect from custom error doc 2024-01-08 17:58:09 +01:00
bert-e d803bdcadc Merge branch 'w/8.7/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.8/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:57 +00:00
bert-e 4f1b8f25b7 Merge branch 'w/8.6/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.7/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 94363482c3 Merge branch 'w/7.70/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.6/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 6b0a8cb9ed Merge branch 'bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/7.70/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:55 +00:00
Will Toozs 5dbf5d965f
CLDSRV-477: add tests 2024-01-08 14:43:41 +01:00
Will Toozs ebefc4b5b0
CLDSRV-477: change position of ACL check 2024-01-08 14:43:40 +01:00
Mickael Bourgois ac1c75e414
CLDSRV-488: test website 404 with bucket policy 2024-01-05 12:52:51 +01:00
Mickael Bourgois fee4f3a96e
CLDSRV-488: fix website 404 with bucket policy
If bucket policy authorize access to a
non existant object, there should be a 404
and not a 403
2024-01-05 12:52:50 +01:00
bert-e e969eeaa20 Merge branches 'w/8.8/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.8 2024-01-05 11:24:59 +00:00
bert-e 2ee78bcf6a Merge branches 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.7 2024-01-05 11:24:58 +00:00
bert-e 64273365d5 Merge branches 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.6 2024-01-05 11:24:58 +00:00
bert-e 65c6bacd34 Merge branches 'w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.10/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/7.70 2024-01-05 11:24:57 +00:00
bert-e d60d252eaf Merge branch 'bugfix/CLDSRV-490-bucket-policy-resource' into q/7.10 2024-01-05 11:24:57 +00:00
bert-e f31fe2f2bf Merge branch 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.8/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
bert-e ee47cece90 Merge branch 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.7/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
Mickael Bourgois 7a5cddacbc
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into w/8.6/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:08:54 +01:00
Mickael Bourgois baa6203b57
Merge remote-tracking branch 'origin/bugfix/CLDSRV-490-bucket-policy-resource' into w/7.70/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:04:25 +01:00
Mickael Bourgois 141056637b
CLDSRV-490: bump version 2024-01-05 11:51:49 +01:00
Mickael Bourgois 0f007e0489
CLDSRV-490: fix linting in tests for 8.6 2024-01-05 11:51:48 +01:00
Mickael Bourgois 2d50a76923
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-486-object-redirect-root' into w/8.8/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:59:20 +01:00
Mickael Bourgois 6b4f10ae56
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-486-object-redirect-root' into w/8.7/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:57:36 +01:00
Mickael Bourgois 23eaf89cc3
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-486-object-redirect-root' into w/8.6/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:55:48 +01:00
Mickael Bourgois d6a2144508
Merge remote-tracking branch 'origin/bugfix/CLDSRV-486-object-redirect-root' into w/7.70/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:49:20 +01:00
Mickael Bourgois 40dd3f37a4
Merge branch 'development/7.10' into bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:36:03 +01:00
Mickael Bourgois d3307654a6
CLDSRV-486: bump cloudserver version 2024-01-04 16:34:10 +01:00
Mickael Bourgois e342a90b48
CLDSRV-486: bump arsenal version 2024-01-04 16:29:04 +01:00
williamlardier dbda5f16a6 CLDSRV-407: bump mongodb to v5.0 in CI 2024-01-04 14:04:20 +01:00
Mickael Bourgois d4a4825668
CLDSRV-490: test bucket policy with request 2024-01-04 10:18:36 +01:00
Mickael Bourgois 83b9e9a775
CLDSRV-490: fix missing request for bucket policy
If request is missing, bucket policy ignore resource
and apply effect to any matching principal and action
2024-01-03 18:24:54 +01:00
Maha Benzekri 2959c950dd
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.8/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:36:20 +01:00
Maha Benzekri 462ddf7ef1
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:34:44 +01:00
Maha Benzekri fda42e7399
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:32:41 +01:00
Maha Benzekri edbd6caeb4
Merge remote-tracking branch 'origin/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 09:38:25 +01:00
Maha Benzekri 1befaa1f28
CLDSRV-480: CLDSRV version bump 2024-01-03 09:35:19 +01:00
Maha Benzekri 0cefca831d
CLDSRV-480: condition check fix for isImplicit 2024-01-03 09:34:19 +01:00
Jonathan Gramain ea7b69e313 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:26:27 -08:00
Jonathan Gramain 8ec1c2f2db Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:08:40 -08:00
Jonathan Gramain 3af6ca5f6d Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:06:45 -08:00
Jonathan Gramain 997d71df08 Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 14:49:08 -08:00
Jonathan Gramain 275ebcec5c CLDSRV-478 bump cloudserver version 2024-01-02 14:45:56 -08:00
Mickael Bourgois 8b77530b2b
CLDSRV-486: fix object redirect to root / 2024-01-02 19:16:32 +01:00
bert-e 43f9606598 Merge branch 'w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:02 +00:00
bert-e be34e5ad59 Merge branch 'w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:01 +00:00
Jonathan Gramain 5bc64ede43 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:41:03 -08:00
Jonathan Gramain 911010376e Merge remote-tracking branch 'origin/bugfix/CLDSRV-478-bump-arsenal-dep' into w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:26:30 -08:00
Jonathan Gramain b5ec37b38b bf: CLDSRV-478 bump arsenal dependency 2024-01-02 09:19:15 -08:00
Mickael Bourgois 3ce869cea3
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-479-website-fqdn-index' into w/8.8/bugfix/CLDSRV-479-website-fqdn-index
# Conflicts:
#	package.json
2024-01-02 11:40:28 +01:00
Mickael Bourgois b7960784db
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-479-website-fqdn-index' into w/8.7/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:35:36 +01:00
Mickael Bourgois 5ac10cefa8
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-479-website-fqdn-index' into w/8.6/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:33:49 +01:00
Mickael Bourgois 2dafefd77f
Merge remote-tracking branch 'origin/bugfix/CLDSRV-479-website-fqdn-index' into w/7.70/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:29:47 +01:00
Mickael Bourgois 36f147b441
CLDSRV-479: update test bucket policy index 2024-01-02 11:13:40 +01:00
Mickael Bourgois 8ed447ba63
CLDSRV-479: helper function for index append 2024-01-02 10:27:38 +01:00
bert-e bf235f3335 Merge branch 'w/8.7/bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.8/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:53 +00:00
bert-e 569c9f4368 Merge branch 'bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:52 +00:00
Nicolas Humbert 92cf03254a CLDSRV-483 Improve Ruby test output readability and Enable backtrace 2023-12-31 11:08:21 +01:00
Nicolas Humbert c57ae9c8ea CLDSRV-483 Bump ruby patch version to fix malformed header response
More info about the malformed header response: https://github.com/excon/excon/issues/845
2023-12-31 11:08:16 +01:00
Mickael Bourgois 5bec42d051
CLDSRV-479: test index with bucket policy 2023-12-29 17:43:34 +01:00
Mickael Bourgois f427fc9b70
CLDSRV-479: bump version 2023-12-28 15:20:59 +01:00
Mickael Bourgois 9aad4ae3ea
CLDSRV-479: fix error on index using bucket policy
The variable holding the new objectKey with index suffix
is not propagated to bucket policy function.
_checkBucketPolicyResources function extract objectKey from request.
2023-12-28 15:20:48 +01:00
bert-e 1a3cb8108c Merge branch 'q/5495/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 042120b17e Merge branch 'q/5495/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e ba4593592d Merge branch 'w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 6efdb627da Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e 5306bf0b5c Merge branch 'q/5495/7.70/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 5b22819c3f Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 126ca3560f Merge branch 'improvement/CLDSRV-451-specific-7.70-apis-update' into q/7.70 2023-12-15 06:44:02 +00:00
bert-e e5b692f3db Merge branch 'w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.8/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:49 +00:00
bert-e 548ae8cd12 Merge branch 'w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:48 +00:00
Taylor McKinnon 80376405df Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:30:13 -08:00
Taylor McKinnon a612e5c27c Merge remote-tracking branch 'origin/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:27:03 -08:00
Taylor McKinnon c3b7662086 impr(CLDSRV-467): Bump Utapi dependency to 7.10.15 2023-12-14 10:17:18 -08:00
Taylor McKinnon 818b1e60d1 impr(CLDSRV-467): Add new Utapi Reindex option `utapi.reindex.onlyCountLatestWhenObjectLocked` 2023-12-14 10:17:18 -08:00
bert-e 2a919af071 Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:47 +00:00
bert-e 5c300b8b6c Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:46 +00:00
Maha Benzekri ad3ebd3db2
CLDSRV-451: fix on gettagging 2023-12-14 18:21:24 +01:00
Maha Benzekri 99068e7265
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:36:17 +01:00
Maha Benzekri cd039d8133
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update
In this commit the only api change compared to the 8.6 is the
routeVeeam.
2023-12-14 17:33:03 +01:00
Maha Benzekri dd3ec25d74
Merge remote-tracking branch 'origin/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update
In this merge, we have updated the tagging apis along with the
lifecycle apis and metadata search apis and objectRestore, unit test
for objectRestore has been updated as well.
2023-12-14 17:28:46 +01:00
Maha Benzekri 717228bdfc
CLDSRV-451: bump Cloudserver version 2023-12-14 16:59:10 +01:00
Maha Benzekri 836fc80560
CLDSRV-451: updating buckettagging apis for impDeny 2023-12-14 16:58:14 +01:00
Maha Benzekri 75b293df8d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.8/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:37:14 +01:00
Maha Benzekri a855e38998
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:35:02 +01:00
Maha Benzekri 51d5666bec
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:32:36 +01:00
Maha Benzekri ecb74a2db3
Merge remote-tracking branch 'origin/improvement/CLDSRV-431-misc-api-implicitDeny' into w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:26:57 +01:00
Maha Benzekri cdcdf8eff0
CLDSRV-431: cloudserver version bump 2023-12-14 12:22:42 +01:00
Maha Benzekri dc39b37877
CLDSRV-431: arsenal bump 2023-12-14 12:21:53 +01:00
Maha Benzekri 4897b3c720
CLDSRV-431: changes on misc api for impDeny 2023-12-13 11:14:21 +01:00
Maha Benzekri ffe4ea4afe
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.8/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 14:47:24 +01:00
Maha Benzekri a16cfad0fc
CLDSRV-474: mongodb_image on all jobs 2023-12-12 14:06:02 +01:00
bert-e 556163e3e9 Merge branch 'w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:55:56 +00:00
Maha Benzekri 8fe9f16661
CLDSRV-474: Removing the docker-compose commands from the tests.yaml 2023-12-12 13:53:53 +01:00
Maha Benzekri eb9ff85bd9
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 13:52:50 +01:00
bert-e 52994c0177 Merge branch 'improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:44:56 +00:00
tmacro e109b0fca7
CLDSRV-474: fix CI fail 2023-12-12 10:21:01 +01:00
Maha Benzekri 9940699f9d
CLDSRV-474: fixup on mutiObjectDelete 2023-12-12 10:11:18 +01:00
Maha Benzekri 869d554e43
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.8/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:42:25 +01:00
Maha Benzekri 2f8b228595
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:39:20 +01:00
Maha Benzekri 539b2c1630
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:35:11 +01:00
Maha Benzekri 320766e7b2
Merge remote-tracking branch 'origin/improvement/CLDSRV-430-delete-api-implicitDeny' into w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:31:56 +01:00
Maha Benzekri 74425d03f8
CLDSRV-430: version bump 2023-12-08 18:29:19 +01:00
Maha Benzekri 91629a0d18
CLDSRV-430: add delete API implicit deny logic
As for multiObjectDelete,a new function was added to
ensure that all actions are allowed.
2023-12-08 18:29:17 +01:00
Maha Benzekri e44b7ed918
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 12:00:50 +01:00
Maha Benzekri 3cb29f7f8e
CLDSRV-429: version bump for version release 2023-12-05 12:00:09 +01:00
Maha Benzekri 4f08a4dff2
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 11:58:27 +01:00
Maha Benzekri 15a1aa7965
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 10:58:04 +01:00
Maha Benzekri 4470ee9125
CLDSRV-429: version bump for release 2023-12-05 10:55:31 +01:00
Francois Ferrand d8c12597ea
Release cloudserver 8.8.7
Issue: CLDSRV-471
2023-12-01 19:03:38 +01:00
Francois Ferrand c8eb9025fa
Merge remote-tracking branch 'origin/improvement/CLDSRV-471' into w/8.8/improvement/CLDSRV-471 2023-12-01 19:03:17 +01:00
Francois Ferrand 57e0f71e6a
Release cloudserver 8.7.33
Issue: CLDSRV-471
2023-12-01 19:01:30 +01:00
Francois Ferrand f22f920ee2
Bump arsenal 8.1.115
Issue: CLDSRV-471
2023-12-01 18:42:26 +01:00
Maha Benzekri ed1bb6301d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:31:50 +01:00
Maha Benzekri 70dfa5b11b
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:29:14 +01:00
Maha Benzekri f17e7677fa
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:27:44 +01:00
Maha Benzekri 63b00fef55
Merge remote-tracking branch 'origin/improvement/CLDSRV-429-get-apis-implicitDeny' into w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:25:04 +01:00
Maha Benzekri b4f0d34abd
CLDSRV-429: version bump 2023-12-01 10:27:58 +01:00
Maha Benzekri e18f83ef0d
CLDSRV-429: update get apis with impDeny logic 2023-11-30 17:17:30 +01:00
Francois Ferrand a4e6f9d034
Add lifecycle restore duration metrics
Issue: CLDSRV-471
2023-11-30 14:55:01 +01:00
203 changed files with 9210 additions and 9543 deletions

View File

@ -16,7 +16,7 @@ runs:
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
@ -30,9 +30,6 @@ runs:
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Install python deps
shell: bash
run: pip install docker-compose
- name: Setup python2 test environment
shell: bash
run: |

View File

@ -40,6 +40,11 @@ services:
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
@ -67,7 +72,7 @@ services:
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: registry.scality.com/cloudserver-dev/pykmip
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:

View File

@ -1,4 +1,4 @@
FROM mongo:4.4.21
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \

View File

@ -1,3 +1,3 @@
FROM registry.scality.com/federation/sproxyd:7.10.6.8
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -1,7 +1,10 @@
name: Test alerts
on:
push
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs:
run-alert-tests:
@ -17,13 +20,16 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.1
uses: scality/action-prom-render-test@1.0.3
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: >-
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -3,7 +3,7 @@ name: codeQL
on:
push:
branches: [development/*, stabilization/*, hotfix/*]
branches: [w/**, q/*]
pull_request:
branches: [development/*, stabilization/*, hotfix/*]
workflow_dispatch:
@ -14,12 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v3
uses: actions/dependency-review-action@v4

View File

@ -10,58 +10,69 @@ on:
required: true
env:
REGISTRY_NAME: registry.scality.com
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: inherit
with:
push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: .
file: images/svc-base/Dockerfile
tag: ${{ github.event.inputs.tag }}-svc-base
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Login to Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY_NAME }}
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Push dashboards into the production namespace
run: |
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v4
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}

View File

@ -67,23 +67,24 @@ env:
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-node@v2
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v3
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
@ -116,7 +117,7 @@ jobs:
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v2
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -132,63 +133,72 @@ jobs:
packages: write
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Registry
uses: docker/login-action@v2
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
password: ${{ github.token }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v4
uses: docker/build-push-action@v5
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v4
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose --profile sproxyd up -d
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
@ -199,7 +209,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -219,15 +229,15 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose --profile mongo up -d
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
@ -237,7 +247,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -258,15 +268,15 @@ jobs:
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose --profile mongo up -d
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
@ -277,7 +287,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -297,12 +307,13 @@ jobs:
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
@ -311,7 +322,7 @@ jobs:
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup CI services
run: docker-compose up -d
run: docker compose up -d
working-directory: .github/docker
- name: Run file ft tests
run: |-
@ -319,7 +330,7 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -333,17 +344,18 @@ jobs:
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker-compose up -d
run: docker compose up -d
working-directory: .github/docker
- name: Run file utapi v2 tests
run: |-
@ -351,7 +363,51 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -367,18 +423,20 @@ jobs:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
run: cp -r ./certs /tmp/ssl-kmip
working-directory: .github/pykmip
- name: Setup CI services
run: docker-compose --profile pykmip up -d
run: docker compose --profile pykmip up -d
working-directory: .github/docker
- name: Run file KMIP tests
run: |-
@ -387,7 +445,7 @@ jobs:
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -395,7 +453,7 @@ jobs:
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
@ -407,30 +465,30 @@ jobs:
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.0'
ruby-version: '2.5.9'
- name: Install Ruby dependencies
run: |
gem install nokogiri:1.12.5 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies
run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services
run: docker-compose --profile ceph up -d
run: docker compose --profile ceph up -d
working-directory: .github/docker
env:
S3METADATA: mongodb
@ -452,7 +510,7 @@ jobs:
- name: Run Ruby tests
run: |-
set -ex -o pipefail;
rspec tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests
run: |-
@ -465,7 +523,7 @@ jobs:
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net

View File

@ -23,6 +23,7 @@ RUN apt-get update \
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################

175
README.md
View File

@ -1,10 +1,7 @@
# Zenko CloudServer
# Zenko CloudServer with Vitastor Backend
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -14,137 +11,71 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
## Quick Start with Vitastor
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## Docker
Installation instructions:
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
### Install Vitastor
## Contributing
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
### Install Zenko with Vitastor Backend
## Installation
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Dependencies
### Install and Configure MongoDB
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Clone source code
### Setup Zenko
```shell
git clone https://github.com/scality/S3.git
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
If you get an error regarding installation of the diskUsage module,
please install g++.
# Author & License
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,6 +4,7 @@
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
@ -101,6 +102,14 @@
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {

71
config.json.vitastor Normal file
View File

@ -0,0 +1,71 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,7 @@ const constants = {
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
@ -205,9 +205,6 @@ const constants = {
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
@ -220,6 +217,7 @@ const constants = {
'owner-id',
'versionId',
'isNull',
'isDeleteMarker',
],
unsupportedSignatureChecksums: new Set([
'STREAMING-UNSIGNED-PAYLOAD-TRAILER',
@ -231,6 +229,20 @@ const constants = {
'UNSIGNED-PAYLOAD',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD',
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
};
module.exports = constants;

View File

@ -2,11 +2,12 @@
## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com).
CloudServer has two namespaces there:
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
* Production Namespace: registry.scality.com/cloudserver
* Dev Namespace: registry.scality.com/cloudserver-dev
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
@ -18,8 +19,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
```
## Release Process

View File

@ -1,4 +1,4 @@
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
@ -14,8 +14,10 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all
yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!

View File

@ -1,10 +1,10 @@
'use strict'; // eslint-disable-line strict
/**
* Catch uncaught exceptions and add timestamp to aid debugging
*/
process.on('uncaughtException', err => {
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
});
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')();

View File

@ -107,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config');
}
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints');
@ -336,7 +377,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) {
const supportedBackends =
['mem', 'file', 'scality',
'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => {
@ -461,26 +502,33 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient);
}
});
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
}
function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
function parseUtapiReindex(config) {
const {
enabled,
schedule,
redis,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object');
assert(typeof sentinel.port === 'number',
'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
'bad config: utapi.reindex.enabled must be a boolean');
const parsedRedis = parseRedisConfig(redis);
assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi reindex redis config requires a list of sentinels');
assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number',
'bad config: utapi.reindex.bucketd.port must be a number');
assert(typeof schedule === 'string',
'bad config: utapi.reindex.schedule must be a string');
if (onlyCountLatestWhenObjectLocked !== undefined) {
assert(typeof onlyCountLatestWhenObjectLocked === 'boolean',
'bad config: utapi.reindex.onlyCountLatestWhenObjectLocked must be a boolean');
}
try {
cronParser.parseExpression(schedule);
} catch (e) {
@ -488,6 +536,13 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`);
}
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
}
function requestsConfigAssert(requestsConfig) {
@ -575,7 +630,6 @@ class Config extends EventEmitter {
// Read config automatically
this._getLocationConfig();
this._getConfig();
this._configureBackends();
}
_getLocationConfig() {
@ -787,11 +841,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints;
}
this.clusters = false;
if (config.clusters !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: clusters must be a positive integer');
this.clusters = config.clusters;
this.workers = false;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
this.workers = config.workers;
}
if (config.usEastBehavior !== undefined) {
@ -1029,8 +1083,7 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) {
assert(
this._verifyRedisPassword(config.localCache.password),
assert(typeof config.localCache.password === 'string',
'config: vad password for localCache. password must' +
' be a string');
}
@ -1056,56 +1109,46 @@ class Config extends EventEmitter {
}
if (config.redis) {
if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
}
if (config.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(config.redis.sentinelPassword));
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
this.redis = {};
assert(typeof config.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
this.redis = parseRedisConfig(config.redis);
}
if (config.scuba) {
this.scuba = {};
if (config.scuba.host) {
assert(typeof config.scuba.host === 'string',
'bad config: scuba host must be a string');
this.scuba.host = config.scuba.host;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
if (config.scuba.port) {
assert(Number.isInteger(config.scuba.port)
&& config.scuba.port > 0,
'bad config: scuba port must be a positive integer');
this.scuba.port = config.scuba.port;
}
}
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) {
this.utapi = { component: 's3' };
if (config.utapi.host) {
@ -1134,50 +1177,8 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' +
'configuration: redis');
if (config.utapi.redis) {
if (config.utapi.redis.sentinels) {
this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis = parseRedisConfig(config.utapi.redis);
if (this.utapi.redis.retry === undefined) {
this.utapi.redis.retry = {
connectBackoff: {
min: 10,
@ -1188,22 +1189,6 @@ class Config extends EventEmitter {
},
};
}
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
}
if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics;
@ -1273,8 +1258,7 @@ class Config extends EventEmitter {
}
if (config.utapi && config.utapi.reindex) {
parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
}
}
@ -1319,6 +1303,8 @@ class Config extends EventEmitter {
}
}
this.authdata = config.authdata || 'authdata.json';
this.kms = {};
if (config.kms) {
assert(typeof config.kms.userName === 'string');
@ -1538,25 +1524,6 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs;
}
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort.
@ -1646,6 +1613,8 @@ class Config extends EventEmitter {
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
@ -1684,40 +1653,43 @@ class Config extends EventEmitter {
}
_getAuthData() {
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
}
_configureBackends() {
_configureBackends(config) {
const backends = config.backends || {};
/**
* Configure the backends for Authentication, Data and Metadata.
*/
let auth = 'mem';
let data = 'multiple';
let metadata = 'file';
let kms = 'file';
let auth = backends.auth || 'mem';
let data = backends.data || 'multiple';
let metadata = backends.metadata || 'file';
let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi'
);
auth = process.env.S3BACKEND;
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND;
}
if (process.env.S3VAULT) {
auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
}
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData;
if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) {
process.env.SCALITY_SECRET_ACCESS_KEY) {
authData = buildAuthDataAccount(
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
} else {
authData = this._getAuthData();
}
@ -1725,7 +1697,7 @@ class Config extends EventEmitter {
throw new Error('bad config: invalid auth config file.');
}
this.authData = authData;
} else if (auth === 'multiple') {
} else if (auth === 'multiple') {
const authData = this._getAuthData();
if (validateAuthConfig(authData)) {
throw new Error('bad config: invalid auth config file.');
@ -1740,9 +1712,9 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple'
);
data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
}
assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined,
@ -1755,18 +1727,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) {
kms = process.env.S3KMS;
}
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = {
auth,
data,
metadata,
kms,
quota,
};
}
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) {
this.authData.accounts = accounts;
this.emit('authdata-update');
@ -1889,10 +1861,19 @@ class Config extends EventEmitter {
.update(instanceId)
.digest('hex');
}
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
}
module.exports = {
parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert,
ConfigObject: Config,
config: new Config(),

View File

@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors');
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut');
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight');
@ -64,8 +67,7 @@ const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet');
const vault = require('../auth/vault');
const websiteGet = require('./websiteGet');
const websiteHead = require('./websiteHead');
const website = require('./website');
const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource');
@ -83,6 +85,10 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
@ -191,14 +197,17 @@ const api = {
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(err);
return next(arsenalError);
}
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, next) => {
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
@ -208,7 +217,7 @@ const api = {
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}
// issue 100 Continue to the client
writeContinue(request, response);
@ -239,12 +248,12 @@ const api = {
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
@ -255,13 +264,14 @@ const api = {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params);
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
},
),
], (err, userInfo, authorizationResults, streamingV4Params) => {
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
return callback(err);
}
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) {
@ -278,19 +288,23 @@ const api = {
return acc;
}, {});
}
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params,
log, callback, authorizationResults);
log, methodCallback, authorizationResults);
}
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback);
sourceObject, sourceVersionId, log, methodCallback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, callback);
return this[apiMethod](userInfo, request, log, methodCallback);
});
},
bucketDelete,
@ -317,11 +331,14 @@ const api = {
bucketPutReplication,
bucketGetReplication,
bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle,
bucketDeleteLifecycle,
bucketPutPolicy,
bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy,
bucketPutObjectLock,
bucketPutNotification,
@ -353,8 +370,8 @@ const api = {
objectPutRetention,
objectRestore,
serviceGet,
websiteGet,
websiteHead,
websiteGet: website,
websiteHead: website,
};
module.exports = api;

View File

@ -1,7 +1,19 @@
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants');
const { config } = require('../../../Config');
const { allAuthedUsersId, bucketOwnerActions, logId, publicId, arrayOfAllowed } = constants;
const {
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
@ -39,17 +51,21 @@ function isRequesterNonAccountUser(authInfo) {
function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
// Same logic applies on the Versioned APIs, so let's simplify it.
const requestTypeParsed = requestType.endsWith('Version') ?
let requestTypeParsed = requestType.endsWith('Version') ?
requestType.slice(0, 'Version'.length * -1) : requestType;
requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestTypeParsed) ?
'objectPut' : requestTypeParsed;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
if (bucket.getOwner() === canonicalID) {
return true;
}
if (mainApiCall === 'objectGet') {
if (parsedMainApiCall === 'objectGet') {
if (requestTypeParsed === 'objectGetTagging') {
return true;
}
}
if (mainApiCall === 'objectPut') {
if (parsedMainApiCall === 'objectPut') {
if (arrayOfAllowed.includes(requestTypeParsed)) {
return true;
}
@ -131,8 +147,12 @@ function checkBucketAcls(bucket, requestType, canonicalID, mainApiCall) {
function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIsNotUser,
isUserUnauthenticated, mainApiCall) {
const bucketOwner = bucket.getOwner();
const requestTypeParsed = actionsToConsiderAsObjectPut.includes(requestType) ?
'objectPut' : requestType;
const parsedMainApiCall = actionsToConsiderAsObjectPut.includes(mainApiCall) ?
'objectPut' : mainApiCall;
// acls don't distinguish between users and accounts, so both should be allowed
if (bucketOwnerActions.includes(requestType)
if (bucketOwnerActions.includes(requestTypeParsed)
&& (bucketOwner === canonicalID)) {
return true;
}
@ -141,9 +161,9 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
}
// Backward compatibility
if (mainApiCall === 'objectGet') {
if (parsedMainApiCall === 'objectGet') {
if ((isUserUnauthenticated || (requesterIsNotUser && bucketOwner === objectMD['owner-id']))
&& requestType === 'objectGetTagging') {
&& requestTypeParsed === 'objectGetTagging') {
return true;
}
}
@ -152,7 +172,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
return false;
}
if (requestType === 'objectGet' || requestType === 'objectHead') {
if (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead') {
if (objectMD.acl.Canned === 'public-read'
|| objectMD.acl.Canned === 'public-read-write'
|| (objectMD.acl.Canned === 'authenticated-read'
@ -178,11 +198,11 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (requestType === 'objectPut' || requestType === 'objectDelete') {
if (requestTypeParsed === 'objectPut' || requestTypeParsed === 'objectDelete') {
return true;
}
if (requestType === 'objectPutACL') {
if (requestTypeParsed === 'objectPutACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -198,7 +218,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
}
}
if (requestType === 'objectGetACL') {
if (requestTypeParsed === 'objectGetACL') {
if ((objectMD.acl.Canned === 'bucket-owner-full-control'
&& bucketOwner === canonicalID)
|| objectMD.acl.FULL_CONTROL.indexOf(canonicalID) > -1
@ -219,7 +239,7 @@ function checkObjectAcls(bucket, objectMD, requestType, canonicalID, requesterIs
const bucketAcl = bucket.getAcl();
const allowPublicReads = publicReadBuckets.includes(bucket.getName())
&& bucketAcl.Canned === 'public-read'
&& (requestType === 'objectGet' || requestType === 'objectHead');
&& (requestTypeParsed === 'objectGet' || requestTypeParsed === 'objectHead');
if (allowPublicReads) {
return true;
}
@ -246,6 +266,20 @@ function _checkBucketPolicyResources(request, resource, log) {
return evaluators.isResourceApplicable(requestContext, resource, log);
}
function _checkBucketPolicyConditions(request, conditions, log) {
const ip = request ? requestUtils.getClientIp(request, config) : undefined;
if (!conditions) {
return true;
}
// build request context from the request!
const requestContext = new RequestContext(request.headers, request.query,
request.bucketName, request.objectKey, ip,
request.connection.encrypted, request.resourceType, 's3', null, null,
null, null, null, null, null, null, null, null, null,
request.objectLockRetentionDays);
return evaluators.meetConditions(requestContext, conditions, log);
}
function _getAccountId(arn) {
// account or user arn is of format 'arn:aws:iam::<12-digit-acct-id>:etc...
return arn.substr(13, 12);
@ -290,11 +324,11 @@ function _checkPrincipals(canonicalID, arn, principal) {
return false;
}
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request) {
function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, log, request, actionImplicitDenies) {
let permission = 'defaultDeny';
// if requester is user within bucket owner account, actions should be
// allowed unless explicitly denied (assumes allowed by IAM policy)
if (bucketOwner === canonicalID) {
if (bucketOwner === canonicalID && actionImplicitDenies[requestType] === false) {
permission = 'allow';
}
let copiedStatement = JSON.parse(JSON.stringify(policy.Statement));
@ -303,12 +337,13 @@ function checkBucketPolicy(policy, requestType, canonicalID, arn, bucketOwner, l
const principalMatch = _checkPrincipals(canonicalID, arn, s.Principal);
const actionMatch = _checkBucketPolicyActions(requestType, s.Action, log);
const resourceMatch = _checkBucketPolicyResources(request, s.Resource, log);
const conditionsMatch = _checkBucketPolicyConditions(request, s.Condition, log);
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Deny') {
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Deny') {
// explicit deny trumps any allows, so return immediately
return 'explicitDeny';
}
if (principalMatch && actionMatch && resourceMatch && s.Effect === 'Allow') {
if (principalMatch && actionMatch && resourceMatch && conditionsMatch && s.Effect === 'Allow') {
permission = 'allow';
}
copiedStatement = copiedStatement.splice(1);
@ -324,7 +359,7 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner,
processedResult = actionImplicitDenies[requestType] === false && aclPermission;
} else {
const bucketPolicyPermission = checkBucketPolicy(bucketPolicy, requestType, canonicalID, arn,
bucketOwner, log, request);
bucketOwner, log, request, actionImplicitDenies);
if (bucketPolicyPermission === 'explicitDeny') {
processedResult = false;
@ -338,7 +373,7 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner,
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}) {
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const mainApiCall = requestTypes[0];
@ -362,6 +397,15 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo
return results[_requestType];
}
const aclPermission = checkBucketAcls(bucket, _requestType, canonicalID, mainApiCall);
// In case of error bucket access is checked with bucketGet
// For website, bucket policy only uses objectGet and ignores bucketGet
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html
// bucketGet should be used to check acl but switched to objectGet for bucket policy
if (isWebsite && _requestType === 'bucketGet') {
// eslint-disable-next-line no-param-reassign
_requestType = 'objectGet';
actionImplicitDenies.objectGet = actionImplicitDenies.objectGet || false;
}
return processBucketPolicy(_requestType, bucket, canonicalID, arn, bucket.getOwner(), log,
request, aclPermission, results, actionImplicitDenies);
});
@ -386,7 +430,7 @@ function evaluateBucketPolicyWithIAM(bucket, requestTypesInput, canonicalID, aut
}
function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}) {
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
const actionImplicitDenies = !actionImplicitDeniesInput ? {} : actionImplicitDeniesInput;
const results = {};
@ -399,16 +443,20 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI
? _requestType.slice(0, -7) : _requestType;
const bucketOwner = bucket.getOwner();
if (!objectMD) {
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if (parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete') {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
// check bucket has read access
// 'bucketGet' covers listObjects and listMultipartUploads, bucket read actions
results[_requestType] = isBucketAuthorized(bucket, 'bucketGet', canonicalID, authInfo, log, request,
actionImplicitDenies);
let permission = 'bucketGet';
if (actionsToConsiderAsObjectPut.includes(_requestType)) {
permission = 'objectPut';
}
results[_requestType] = isBucketAuthorized(bucket, permission, canonicalID, authInfo, log, request,
actionImplicitDenies, isWebsite);
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
// bucket has canned ACL public-read-write
if ((parsedMethodName === 'objectPut' || parsedMethodName === 'objectDelete')
&& results[_requestType] === false) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
}
return results[_requestType];
}
let requesterIsNotUser = true;
@ -466,6 +514,117 @@ function validatePolicyResource(bucketName, policy) {
});
}
function checkIp(value) {
const errString = 'Invalid IP address in Conditions';
const values = Array.isArray(value) ? value : [value];
for (let i = 0; i < values.length; i++) {
// these preliminary checks are validating the provided
// ip address against ipaddr.js, the library we use when
// evaluating IP condition keys. It ensures compatibility,
// but additional checks are required to enforce the right
// notation (e.g., xxx.xxx.xxx.xxx/xx for IPv4). Otherwise,
// we would accept different ip formats, which is not
// standard in an AWS use case.
try {
try {
parseCIDR(values[i]);
} catch (err) {
isValid(values[i]);
}
} catch (err) {
return errString;
}
// Apply the existing IP validation logic to each element
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
if (constants.ipv6Regex.test(ip)) {
return ip.split(':').every(part => part.length <= 4);
}
return false;
};
if (validateIpRegex(values[i]) !== true) {
return errString;
}
}
// If the function hasn't returned by now, all elements are valid
return null;
}
// This function checks all bucket policy conditions if the values provided
// are valid for the condition type. If not it returns a relevant Malformed policy error string
function validatePolicyConditions(policy) {
const validConditions = [
{ conditionKey: 'aws:SourceIp', conditionValueTypeChecker: checkIp },
{ conditionKey: 's3:object-lock-remaining-retention-days' },
];
// keys where value type does not seem to be checked by AWS:
// - s3:object-lock-remaining-retention-days
if (!policy.Statement || !Array.isArray(policy.Statement) || policy.Statement.length === 0) {
return null;
}
// there can be multiple statements in the policy, each with a Condition enclosure
for (let i = 0; i < policy.Statement.length; i++) {
const s = policy.Statement[i];
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
const validCondition = validConditions.find(validCondition =>
validCondition.conditionKey === conditionKey
);
// AWS returns does not return an error if the condition starts with 'aws:'
// so we reproduce this behaviour
if (!validCondition && !conditionKey.startsWith('aws:')) {
return errors.MalformedPolicy.customizeDescription('Policy has an invalid condition key');
}
if (validCondition && validCondition.conditionValueTypeChecker) {
const conditionValueTypeError = validCondition.conditionValueTypeChecker(conditionValue);
if (conditionValueTypeError) {
return errors.MalformedPolicy.customizeDescription(conditionValueTypeError);
}
}
}
}
}
return null;
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts'
&& resourceType === assumedRoleArnResourceType
&& sessionName === backbeatLifecycleSessionName);
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
@ -476,5 +635,7 @@ module.exports = {
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,
validatePolicyConditions,
isLifecycleSession,
evaluateBucketPolicyWithIAM,
};

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
if (apiMethod === 'bucketPut') {
return null;
}
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';

View File

@ -24,7 +24,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
});
}
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, request, log, cb) {
async.mapLimit(mpus, 1, (mpu, next) => {
const splitterChar = mpu.key.includes(oldSplitter) ?
oldSplitter : splitter;
@ -40,7 +40,7 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
byteLength: partSizeSum,
});
next(err);
});
}, request);
}, cb);
}
/**
@ -49,11 +49,13 @@ function _deleteOngoingMPUs(authInfo, bucketName, bucketMD, mpus, log, cb) {
* @param {object} bucketMD - bucket attributes/metadata
* @param {string} bucketName - bucket in which objectMetadata is stored
* @param {string} canonicalID - account canonicalID of requester
* @param {object} request - request object given by router
* including normalized headers
* @param {object} log - Werelogs logger
* @param {function} cb - callback from async.waterfall in bucketDelete
* @return {undefined}
*/
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log, cb) {
log.trace('deleting bucket from metadata');
assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof canonicalID, 'string');
@ -100,7 +102,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
}
if (objectsListRes.Contents.length) {
return _deleteOngoingMPUs(authInfo, bucketName,
bucketMD, objectsListRes.Contents, log, err => {
bucketMD, objectsListRes.Contents, request, log, err => {
if (err) {
return next(err);
}

View File

@ -30,6 +30,9 @@ function bucketShield(bucket, requestType) {
// Otherwise return an error to the client
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
(requestType !== 'objectPut' &&
requestType !== 'initiateMultipartUpload' &&
requestType !== 'objectPutPart' &&
requestType !== 'completeMultipartUpload' &&
requestType !== 'bucketPutACL' &&
requestType !== 'bucketDelete')) {
return true;

View File

@ -3,7 +3,7 @@ const async = require('async');
const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper');
const locationConstraintCheck = require('../object/locationConstraintCheck');
const { metadataValidateBucketAndObj } =
const { standardMetadataValidateBucketAndObj } =
require('../../../metadata/metadataUtils');
const services = require('../../../services');
@ -14,7 +14,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
bucketName,
objectKey,
uploadId,
preciseRequestType: 'multipartDelete',
preciseRequestType: request.apiMethods || 'multipartDelete',
request,
};
// For validating the request at the destinationBucket level
@ -22,10 +22,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
// but the requestType is the more general 'objectDelete'
const metadataValParams = Object.assign({}, metadataValMPUparams);
metadataValParams.requestType = 'objectPut';
const authzIdentityResult = request ? request.actionImplicitDenies : false;
async.waterfall([
function checkDestBucketVal(next) {
metadataValidateBucketAndObj(metadataValParams, log,
standardMetadataValidateBucketAndObj(metadataValParams, authzIdentityResult, log,
(err, destinationBucket) => {
if (err) {
return next(err, destinationBucket);
@ -56,9 +57,14 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
function abortExternalMpu(mpuBucket, mpuOverviewObj, destBucket,
next) {
const location = mpuOverviewObj.controllingLocationConstraint;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.abortMPU(objectKey, uploadId, location, bucketName,
request, destBucket, locationConstraintCheck, log,
(err, skipDataDelete) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityAuthzResults;
if (err) {
return next(err, destBucket);
}

View File

@ -7,6 +7,8 @@ const errors = require('arsenal').errors;
const { config } = require('../../../Config');
const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Get response header "x-amz-restore"
* Be called by objectHead.js
@ -145,7 +147,7 @@ function _updateObjectExpirationDate(objectMD, log) {
});
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setDate(expiryDate.getDate() + objectMD.archive.restoreRequestedDays);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;

View File

@ -5,7 +5,6 @@ const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const constants = require('../../../../constants');
const { data } = require('../../../data/wrapper');
const services = require('../../../services');
const logger = require('../../../utilities/logger');
const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
@ -21,7 +20,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, deleteLog, requestMethod, callback) {
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
services.metadataStoreObject(bucketName, dataGetInfo,
cipherBundle, metadataStoreParams, (err, result) => {
if (err) {
@ -31,7 +30,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
const newDataStoreName = Array.isArray(dataGetInfo) ?
dataGetInfo[0].dataStoreName : null;
return data.batchDelete(dataToDelete, requestMethod,
newDataStoreName, deleteLog, err => callback(err, result));
newDataStoreName, log, err => callback(err, result));
}
return callback(null, result);
});
@ -198,10 +197,9 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */
const requestLogger =
logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([
function storeData(next) {
if (size === 0) {
@ -296,7 +294,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
}
return _storeInMDandDeleteData(bucketName, infoArr,
cipherBundle, metadataStoreParams,
options.dataToDelete, requestLogger, requestMethod, next);
options.dataToDelete, log, requestMethod, next);
},
], callback);
}

View File

@ -3,6 +3,7 @@ const moment = require('moment');
const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/**
@ -304,7 +305,9 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
if (err) {
return cb(err);
}
if (authorizationResults[0].isAllowed !== true) {
const explicitDenyExists = authorizationResults.some(
authzResult => authzResult.isAllowed === false && !authzResult.isImplicit);
if (explicitDenyExists) {
log.trace('authorization check failed for user',
{
'method': 'checkUserPolicyGovernanceBypass',
@ -312,7 +315,25 @@ function checkUserGovernanceBypass(request, authInfo, bucketMD, objectKey, log,
});
return cb(errors.AccessDenied);
}
return cb(null);
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
authInfo.getCanonicalID(),
authInfo,
actionImplicitDenies,
log,
request);
return cb(areAllActionsAllowed === true ? null : errors.AccessDenied);
});
}

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/**
* Check if tier is supported
@ -58,13 +58,22 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
bucketName,
objectKey,
versionId: decodedVidResult,
requestType: 'restoreObject',
requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
};
return async.waterfall([
// get metadata of bucket and object
function validateBucketAndObject(next) {
return mdUtils.metadataValidateBucketAndObj(mdValueParams, log, (err, bucketMD, objectMD) => {
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
log, (err, bucketMD, objectMD) => {
if (err) {
log.trace('request authorization failed', { method: METHOD, error: err });
return next(err);
@ -115,6 +124,16 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD);
});
},
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -210,7 +210,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
// null keys are used, which is used as an optimization to
// avoid having to check the versioned key since there can
// be no more versioned key to clean up
if (mst.isNull && !mst.isNull2) {
if (mst.isNull && mst.versionId && !mst.isNull2) {
const delOptions = { versionId: mst.versionId };
return { options, delOptions };
}
@ -241,7 +241,7 @@ function processVersioningState(mst, vstat, nullVersionCompatMode) {
if (masterIsNull) {
// if master is a null version or a non-versioned key,
// copy it to a new null key
const nullVersionId = mst.isNull ? mst.versionId : nonVersionedObjId;
const nullVersionId = (mst.isNull && mst.versionId) ? mst.versionId : nonVersionedObjId;
if (nullVersionCompatMode) {
options.extraMD = {
nullVersionId,

View File

@ -101,8 +101,33 @@ function validateWebsiteHeader(header) {
header.startsWith('http://') || header.startsWith('https://'));
}
/**
* appendWebsiteIndexDocument - append index to objectKey if necessary
* @param {object} request - normalized request object
* @param {string} indexDocumentSuffix - index document from website config
* @param {boolean} force - flag to force append index
* @return {undefined}
*/
function appendWebsiteIndexDocument(request, indexDocumentSuffix, force = false) {
const reqObjectKey = request.objectKey ? request.objectKey : '';
/* eslint-disable no-param-reassign */
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
request.objectKey += indexDocumentSuffix;
// find index document if no key provided
} else if (reqObjectKey === '') {
request.objectKey = indexDocumentSuffix;
// force for redirect 302 on folder without trailing / that has an index
} else if (force) {
request.objectKey += `/${indexDocumentSuffix}`;
}
/* eslint-enable no-param-reassign */
}
module.exports = {
findRoutingRule,
extractRedirectInfo,
validateWebsiteHeader,
appendWebsiteIndexDocument,
};

View File

@ -0,0 +1,314 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -1,7 +1,7 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents,
@ -77,7 +77,7 @@ function listLifecycleCurrents(authInfo, locationConstraints, request, log, call
maxScannedLifecycleListingEntries,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(

View File

@ -1,7 +1,7 @@
const { errors, versioning } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/monitoringHandler');
@ -83,7 +83,7 @@ function listLifecycleNonCurrents(authInfo, locationConstraints, request, log, c
listParams.versionIdMarker = params['version-id-marker'] ?
versionIdUtils.decode(params['version-id-marker']) : undefined;
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(

View File

@ -1,7 +1,7 @@
const { errors } = require('arsenal');
const constants = require('../../../constants');
const services = require('../../services');
const { metadataValidateBucket } = require('../../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
@ -68,7 +68,7 @@ function listLifecycleOrphanDeleteMarkers(authInfo, locationConstraints, request
maxScannedLifecycleListingEntries,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
if (err) {
log.debug('error processing request', { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
@ -34,7 +34,7 @@ function bucketDelete(authInfo, request, log, cb) {
request,
};
return metadataValidateBucket(metadataValParams, log,
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucketMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucketMD);
@ -48,7 +48,7 @@ function bucketDelete(authInfo, request, log, cb) {
log.trace('passed checks',
{ method: 'metadataValidateBucket' });
return deleteBucket(authInfo, bucketMD, bucketName,
authInfo.getCanonicalID(), log, err => {
authInfo.getCanonicalID(), request, log, err => {
if (err) {
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket');

View File

@ -38,7 +38,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
}
log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', {
requestType,
method: 'bucketDeleteCors',

View File

@ -1,7 +1,7 @@
const async = require('async');
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -21,12 +21,12 @@ function bucketDeleteEncryption(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketDeleteEncryption',
requestType: request.apiMethods || 'bucketDeleteEncryption',
request,
};
return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next),
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => {
const sseConfig = bucket.getServerSideEncryption();

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketDeleteLifecycle',
requestType: request.apiMethods || 'bucketDeleteLifecycle',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
@ -16,10 +16,10 @@ function bucketDeletePolicy(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketDeletePolicy',
requestType: request.apiMethods || 'bucketDeletePolicy',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -0,0 +1,58 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -1,5 +1,5 @@
const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
@ -18,10 +18,10 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketDeleteReplication',
requestType: request.apiMethods || 'bucketDeleteReplication',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const metadata = require('../metadata/wrapper');
@ -20,16 +20,20 @@ function bucketDeleteTagging(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketDeleteTagging',
requestType: request.apiMethods || 'bucketDeleteTagging',
request,
};
let bucket = null;
return waterfall([
next => metadataValidateBucket(metadataValParams, log,
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
if (err) {
return next(err);
}
bucket = b;
bucket.setTags([]);
return next(err);
return next();
}),
next => metadata.updateBucket(bucket.getName(), bucket, log, next),
], err => {

View File

@ -30,7 +30,8 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
}
log.trace('found bucket in metadata');
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', {
requestType,
method: 'bucketDeleteWebsite',

View File

@ -2,7 +2,7 @@ const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants');
const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities');
@ -322,7 +322,7 @@ function bucketGet(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGet',
requestType: request.apiMethods || 'bucketGet',
request,
};
const listParams = {
@ -345,7 +345,7 @@ function bucketGet(authInfo, request, log, callback) {
listParams.marker = params.marker;
}
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {

View File

@ -1,5 +1,5 @@
const aclUtils = require('../utilities/aclUtils');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
@ -44,7 +44,7 @@ function bucketGetACL(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetACL',
requestType: request.apiMethods || 'bucketGetACL',
request,
};
const grantInfo = {
@ -55,7 +55,7 @@ function bucketGetACL(authInfo, request, log, callback) {
},
};
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {

View File

@ -39,7 +39,8 @@ function bucketGetCors(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', {
requestType,
method: 'bucketGetCors',

View File

@ -4,7 +4,7 @@ const async = require('async');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml;
/**
@ -22,12 +22,12 @@ function bucketGetEncryption(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetEncryption',
requestType: request.apiMethods || 'bucketGetEncryption',
request,
};
return async.waterfall([
next => metadataValidateBucket(metadataValParams, log, next),
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, next),
(bucket, next) => checkExpectedBucketOwner(request.headers, bucket, log, err => next(err, bucket)),
(bucket, next) => {
// If sseInfo is present but the `mandatory` flag is not set

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const LifecycleConfiguration =
require('arsenal').models.LifecycleConfiguration;
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
@ -21,10 +21,10 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetLifecycle',
requestType: request.apiMethods || 'bucketGetLifecycle',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -41,7 +41,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', {
requestType,
method: 'bucketGetLocation',

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { NotificationConfiguration } = require('arsenal').models;
@ -37,11 +37,11 @@ function bucketGetNotification(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetNotification',
requestType: request.apiMethods || 'bucketGetNotification',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -1,5 +1,5 @@
const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const ObjectLockConfiguration =
@ -33,10 +33,10 @@ function bucketGetObjectLock(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetObjectLock',
requestType: request.apiMethods || 'bucketGetObjectLock',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
@ -17,11 +17,11 @@ function bucketGetPolicy(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetPolicy',
requestType: request.apiMethods || 'bucketGetPolicy',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

58
lib/api/bucketGetQuota.js Normal file
View File

@ -0,0 +1,58 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -1,6 +1,6 @@
const { errors } = require('arsenal');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfigurationXML } =
require('./apiUtils/bucket/getReplicationConfiguration');
@ -21,10 +21,10 @@ function bucketGetReplication(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetReplication',
requestType: request.apiMethods || 'bucketGetReplication',
request,
};
return metadataValidateBucket(metadataValParams, log, (err, bucket) => {
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
const { pushMetric } = require('../utapi/utilities');
@ -37,7 +37,7 @@ const escapeForXml = s3middleware.escapeForXml;
function tagsToXml(tags) {
const xml = [];
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Tagging><TagSet>');
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?><Tagging> <TagSet>');
tags.forEach(tag => {
xml.push('<Tag>');
@ -46,7 +46,7 @@ function tagsToXml(tags) {
xml.push('</Tag>');
});
xml.push('</TagSet></Tagging>');
xml.push('</TagSet> </Tagging>');
return xml.join('');
}
@ -67,7 +67,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetTagging',
requestType: request.apiMethods || 'bucketGetTagging',
request,
};
let bucket = null;
@ -75,7 +75,7 @@ function bucketGetTagging(authInfo, request, log, callback) {
let tags = null;
return waterfall([
next => metadataValidateBucket(metadataValParams, log,
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err);

View File

@ -1,4 +1,4 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
@ -54,11 +54,11 @@ function bucketGetVersioning(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketGetVersioning',
requestType: request.apiMethods || 'bucketGetVersioning',
request,
};
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {

View File

@ -39,7 +39,8 @@ function bucketGetWebsite(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (!isBucketAuthorized(bucket, requestType, canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', {
requestType,
method: 'bucketGetWebsite',

View File

@ -1,5 +1,5 @@
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
@ -19,10 +19,10 @@ function bucketHead(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketHead',
requestType: request.apiMethods || 'bucketHead',
request,
};
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {

View File

@ -45,9 +45,8 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost];
} else {
log.trace('no location constraint provided on bucket put;' +
'setting us-east-1');
locationConstraintChecked = 'us-east-1';
locationConstraintChecked = Object.keys(locationConstrains)[0];
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
}
if (!locationConstraints[locationConstraintChecked]) {

View File

@ -70,8 +70,8 @@ function bucketPutCors(authInfo, request, log, callback) {
});
},
function validateBucketAuthorization(bucket, rules, corsHeaders, next) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, authInfo,
log, request, request.actionImplicitDenies)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for account on bucket', {
requestType,
});

View File

@ -26,7 +26,7 @@ function bucketPutObjectLock(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketPutObjectLock',
requestType: request.apiMethods || 'bucketPutObjectLock',
request,
};
return waterfall([

View File

@ -1,10 +1,9 @@
const async = require('async');
const { errors, models } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { validatePolicyResource } =
const { validatePolicyResource, validatePolicyConditions } =
require('./apiUtils/authorization/permissionChecks');
const { BucketPolicy } = models;
@ -17,9 +16,8 @@ const { BucketPolicy } = models;
function _checkNotImplementedPolicy(policyString) {
// bucket names and key names cannot include "", so including those
// isolates not implemented keys
return policyString.includes('"Condition"')
|| policyString.includes('"Service"')
|| policyString.includes('"Federated"');
return policyString.includes('"Service"')
|| policyString.includes('"Federated"');
}
/**
@ -67,7 +65,7 @@ function bucketPutPolicy(authInfo, request, log, callback) {
return next(errors.MalformedPolicy.customizeDescription(
'Policy has invalid resource'));
}
return next(null, bucketPolicy);
return next(validatePolicyConditions(bucketPolicy), bucketPolicy);
});
},
(bucketPolicy, next) => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,

View File

@ -3,7 +3,7 @@ const { s3middleware } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const { checkExpectedBucketOwner } = require('./apiUtils/authorization/bucketOwner');
@ -38,11 +38,12 @@ function bucketPutTagging(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'bucketPutTagging',
requestType: request.apiMethods || 'bucketPutTagging',
request,
};
let bucket = null;
return waterfall([
next => metadataValidateBucket(metadataValParams, log,
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err);

View File

@ -49,8 +49,8 @@ function bucketPutWebsite(authInfo, request, log, callback) {
});
},
function validateBucketAuthorization(bucket, config, next) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID, authInfo,
log, request, request.actionImplicitDenies)) {
if (!isBucketAuthorized(bucket, request.apiMethods || requestType, canonicalID,
authInfo, log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', {
requestType,
method: 'bucketPutWebsite',

View File

@ -0,0 +1,85 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketUpdateQuota',
request,
};
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketUpdateQuota'
});
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketUpdateQuota;

View File

@ -12,7 +12,7 @@ const constants = require('../../constants');
const { versioningPreprocessing, checkQueryVersionId, decodeVID, overwritingVersioning }
= require('./apiUtils/object/versioning');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const { skipMpuPartProcessing } = storage.data.external.backendUtils;
@ -21,8 +21,6 @@ const { validateAndFilterMpuParts, generateMpuPartStorageInfo } =
const locationKeysHaveChanged
= require('./apiUtils/object/locationKeysHaveChanged');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const logger = require('../utilities/logger');
const { validatePutVersionId } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID;
@ -82,7 +80,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
uploadId,
// Note: permissions for completing a multipart upload are the
// same as putting a part.
requestType: 'putPart or complete',
requestType: request.apiMethods || 'putPart or complete',
log,
request,
};
@ -133,10 +131,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
bucketName,
// Required permissions for this action
// at the destinationBucket level are same as objectPut
requestType: 'objectPut',
requestType: request.apiMethods || 'completeMultipartUpload',
versionId,
request,
};
metadataValidateBucketAndObj(metadataValParams, log, next);
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log, next);
},
function validateMultipart(destBucket, objMD, next) {
if (objMD) {
@ -214,9 +213,14 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const mdInfo = { storedParts, mpuOverviewKey, splitter };
const mpuInfo =
{ objectKey, uploadId, jsonList, bucketName, destBucket };
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.completeMPU(request, mpuInfo, mdInfo, location,
null, null, null, locationConstraintCheck, log,
(err, completeObjData) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) {
return next(err, destBucket);
}
@ -470,12 +474,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
const newDataStoreName =
Array.isArray(dataLocations) && dataLocations[0] ?
dataLocations[0].dataStoreName : null;
const delLog =
logger.newRequestLoggerFromSerializedUids(log
.getSerializedUids());
return data.batchDelete(dataToDelete,
request.method,
newDataStoreName, delLog, err => {
newDataStoreName, log, err => {
if (err) {
return next(err);
}
@ -498,10 +499,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
function batchDeleteExtraParts(extraPartLocations, destinationBucket,
aggregateETag, generatedVersionId, next) {
if (extraPartLocations && extraPartLocations.length > 0) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(extraPartLocations, request.method,
null, delLog, err => {
null, log, err => {
if (err) {
return next(err);
}

View File

@ -6,10 +6,11 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
@ -65,7 +66,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
@ -105,7 +106,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
authInfo,
bucketName,
// Required permissions for this action are same as objectPut
requestType: 'objectPut',
requestType: request.apiMethods || 'initiateMultipartUpload',
request,
};
const accountCanonicalID = authInfo.getCanonicalID();
@ -274,7 +275,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
}
async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(error, destinationBucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, request.method, destinationBucket);
if (error) {

View File

@ -6,7 +6,7 @@ const convertToXml = s3middleware.convertToXml;
const constants = require('../../constants');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
@ -96,8 +96,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
// to list the multipart uploads so we have provided here that
// the authorization to list multipart uploads is the same
// as listing objects in a bucket.
requestType: 'bucketGet',
preciseRequestType: 'listMultipartUploads',
requestType: request.apiMethods || 'bucketGet',
preciseRequestType: request.apiMethods || 'listMultipartUploads',
request,
};
@ -105,7 +105,7 @@ function listMultipartUploads(authInfo, request, log, callback) {
function waterfall1(next) {
// Check final destination bucket for authorization rather
// than multipart upload bucket
metadataValidateBucket(metadataValParams, log,
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket));
},
function getMPUBucket(bucket, next) {

View File

@ -8,7 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const locationConstraintCheck =
require('./apiUtils/object/locationConstraintCheck');
const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
@ -97,7 +97,7 @@ function listParts(authInfo, request, log, callback) {
bucketName,
objectKey,
uploadId,
preciseRequestType: 'listParts',
preciseRequestType: request.apiMethods || 'listParts',
request,
};
// For validating the request at the destinationBucket level
@ -114,7 +114,7 @@ function listParts(authInfo, request, log, callback) {
async.waterfall([
function checkDestBucketVal(next) {
metadataValidateBucketAndObj(metadataValParams, log,
standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, destinationBucket) => {
if (err) {
return next(err, destinationBucket, null);
@ -152,8 +152,13 @@ function listParts(authInfo, request, log, callback) {
mpuOverviewObj,
destBucket,
};
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.listParts(mpuInfo, request, locationConstraintCheck,
log, (err, backendPartList) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) {
return next(err, destBucket);
}

View File

@ -1,7 +1,7 @@
const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
@ -71,7 +71,7 @@ function metadataSearch(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: 'metadataSearch',
requestType: request.apiMethods || 'metadataSearch',
request,
};
const listParams = {
@ -103,7 +103,7 @@ function metadataSearch(authInfo, request, log, callback) {
listParams.marker = params.marker;
}
metadataValidateBucket(metadataValParams, log, (err, bucket) => {
standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {

View File

@ -11,7 +11,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const services = require('../services');
const vault = require('../auth/vault');
const { isBucketAuthorized } =
const { isBucketAuthorized, evaluateBucketPolicyWithIAM } =
require('./apiUtils/authorization/permissionChecks');
const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
@ -31,6 +31,7 @@ const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/*
Format of xml request:
@ -332,6 +333,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId);
},
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => {
const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
@ -492,15 +496,47 @@ function multiObjectDelete(authInfo, request, log, callback) {
return next(null, quietSetting, objects);
});
},
function checkPolicies(quietSetting, objects, next) {
function checkBucketMetadata(quietSetting, objects, next) {
const errorResults = [];
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
objects.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the objects array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, objects, bucketMD);
});
},
function checkPolicies(quietSetting, errorResults, objects, bucketMD, next) {
// track keys that are still on track to be deleted
const inPlay = [];
const errorResults = [];
// if request from account, no need to check policies
// all objects are inPlay so send array of object keys
// as inPlay argument
if (!isRequesterNonAccountUser(authInfo)) {
return next(null, quietSetting, errorResults, objects);
return next(null, quietSetting, errorResults, objects, bucketMD);
}
// TODO: once arsenal's extractParams is separated from doAuth
@ -544,7 +580,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
error: errors.AccessDenied });
});
// send empty array for inPlay
return next(null, quietSetting, errorResults, []);
return next(null, quietSetting, errorResults, [], bucketMD);
}
if (err) {
log.trace('error checking policies', {
@ -562,6 +598,13 @@ function multiObjectDelete(authInfo, request, log, callback) {
});
return next(errors.InternalError);
}
// Convert authorization results into an easier to handle format
const actionImplicitDenies = authorizationResults.reduce((acc, curr, idx) => {
const apiMethod = authorizationResults[idx].action;
// eslint-disable-next-line no-param-reassign
acc[apiMethod] = curr.isImplicit;
return acc;
}, {});
for (let i = 0; i < authorizationResults.length; i++) {
const result = authorizationResults[i];
// result is { isAllowed: true,
@ -577,7 +620,26 @@ function multiObjectDelete(authInfo, request, log, callback) {
key: result.arn.slice(slashIndex + 1),
versionId: result.versionId,
};
if (result.isAllowed) {
// Deny immediately if there is an explicit deny
if (!result.isImplicit && !result.isAllowed) {
errorResults.push({
entry,
error: errors.AccessDenied,
});
continue;
}
// Evaluate against the bucket policies
const areAllActionsAllowed = evaluateBucketPolicyWithIAM(
bucketMD,
Object.keys(actionImplicitDenies),
canonicalID,
authInfo,
actionImplicitDenies,
log,
request);
if (areAllActionsAllowed) {
if (validObjectKeys.includes(entry.key)) {
inPlayInternal.push(entry.key);
} else {
@ -590,50 +652,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
});
}
}
return next(null, quietSetting, errorResults, inPlay);
return next(null, quietSetting, errorResults, inPlay, bucketMD);
});
},
function checkBucketMetadata(quietSetting, errorResults, inPlay, next) {
// if no objects in play, no need to check ACLs / get metadata,
// just move on if there is no Origin header
if (inPlay.length === 0 && !request.headers.origin) {
return next(null, quietSetting, errorResults, inPlay,
undefined);
}
return metadata.getBucket(bucketName, log, (err, bucketMD) => {
if (err) {
log.trace('error retrieving bucket metadata',
{ error: err });
return next(err);
}
// check whether bucket has transient or deleted flag
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// if no objects in play, no need to check ACLs
if (inPlay.length === 0) {
return next(null, quietSetting, errorResults, inPlay,
bucketMD);
}
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
inPlay.forEach(entry => {
errorResults.push({
entry,
error: errors.AccessDenied,
});
});
// by sending an empty array as the inPlay array
// async.forEachLimit below will not actually
// make any calls to metadata or data but will continue on
// to the next step to build xml
return next(null, quietSetting, errorResults, [], bucketMD);
}
return next(null, quietSetting, errorResults, inPlay, bucketMD);
});
},
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
return async.each(inPlayInternal,
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),

View File

@ -12,11 +12,10 @@ const { checkQueryVersionId, versioningPreprocessing }
= require('./apiUtils/object/versioning');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const services = require('../services');
const { pushMetric } = require('../utapi/utilities');
const removeAWSChunked = require('./apiUtils/object/removeAWSChunked');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader;
const { config } = require('../Config');
@ -221,6 +220,14 @@ function objectCopy(authInfo, request, sourceBucket,
versionId: sourceVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
const valPutParams = {
@ -228,6 +235,7 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
checkQuota: false,
request,
};
const dataStoreContext = {
@ -241,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {};
if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject');
@ -261,7 +269,7 @@ function objectCopy(authInfo, request, sourceBucket,
}
return async.waterfall([
function checkDestAuth(next) {
return metadataValidateBucketAndObj(valPutParams, log,
return standardMetadataValidateBucketAndObj(valPutParams, request.actionImplicitDenies, log,
(err, destBucketMD, destObjMD) => {
if (err) {
log.debug('error validating put part of request',
@ -279,7 +287,10 @@ function objectCopy(authInfo, request, sourceBucket,
});
},
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return metadataValidateBucketAndObj(valGetParams, log,
return standardMetadataValidateBucketAndObj({
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => {
if (err) {
log.debug('error validating get part of request',
@ -450,10 +461,15 @@ function objectCopy(authInfo, request, sourceBucket,
return next(null, storeMetadataParams, dataLocator, destObjMD,
serverSideEncryption, destBucketMD);
}
const originalIdentityImpDenies = request.actionImplicitDenies;
// eslint-disable-next-line no-param-reassign
delete request.actionImplicitDenies;
return data.copyObject(request, sourceLocationConstraintName,
storeMetadataParams, dataLocator, dataStoreContext,
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
(err, results) => {
// eslint-disable-next-line no-param-reassign
request.actionImplicitDenies = originalIdentityImpDenies;
if (err) {
return next(err, destBucketMD);
}
@ -528,10 +544,8 @@ function objectCopy(authInfo, request, sourceBucket,
// the same as the destination
if (!sourceIsDestination && dataToDelete) {
const newDataStoreName = storeMetadataParams.dataStoreName;
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(dataToDelete, request.method,
newDataStoreName, delLog, err => {
newDataStoreName, log, err => {
if (err) {
// if error, log the error and move on as it is not
// relevant to the client as the client's

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../utapi/utilities');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers');
@ -56,14 +56,14 @@ function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
bucketName,
objectKey,
versionId: reqVersionId,
requestType: 'objectDelete',
requestType: request.apiMethods || 'objectDelete',
request,
};
const canonicalID = authInfo.getCanonicalID();
return async.waterfall([
function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(valParams, log,
return standardMetadataValidateBucketAndObj(valParams, request.actionImplicitDenies, log,
(err, bucketMD, objMD) => {
if (err) {
return next(err, bucketMD);

View File

@ -4,7 +4,7 @@ const { errors } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader, getVersionSpecificMetadataOptions }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -44,12 +44,12 @@ function objectDeleteTagging(authInfo, request, log, callback) {
objectKey,
versionId: reqVersionId,
getDeleteMarker: true,
requestType: 'objectDeleteTagging',
requestType: request.apiMethods || 'objectDeleteTagging',
request,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',

View File

@ -15,7 +15,7 @@ const getReplicationBackendDataLocator =
require('./apiUtils/object/getReplicationBackendDataLocator');
const checkReadLocation = require('./apiUtils/object/checkReadLocation');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { config } = require('../Config');
const { locationConstraints } = config;
const monitoring = require('../utilities/monitoringHandler');
@ -66,11 +66,11 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
objectKey,
versionId,
getDeleteMarker: true,
requestType: 'objectGet',
requestType: request.apiMethods || 'objectGet',
request,
};
return metadataValidateBucketAndObj(mdValParams, log,
return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log,
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);

View File

@ -7,7 +7,7 @@ const { pushMetric } = require('../utapi/utilities');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const vault = require('../auth/vault');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response:
@ -61,7 +61,7 @@ function objectGetACL(authInfo, request, log, callback) {
bucketName,
objectKey,
versionId,
requestType: 'objectGetACL',
requestType: request.apiMethods || 'objectGetACL',
request,
};
const grantInfo = {
@ -74,7 +74,7 @@ function objectGetACL(authInfo, request, log, callback) {
return async.waterfall([
function validateBucketAndObj(next) {
return metadataValidateBucketAndObj(metadataValParams, log,
return standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -40,12 +40,12 @@ function objectGetLegalHold(authInfo, request, log, callback) {
bucketName,
objectKey,
versionId,
requestType: 'objectGetLegalHold',
requestType: request.apiMethods || 'objectGetLegalHold',
request,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -40,12 +40,12 @@ function objectGetRetention(authInfo, request, log, callback) {
bucketName,
objectKey,
versionId: reqVersionId,
requestType: 'objectGetRetention',
requestType: request.apiMethods || 'objectGetRetention',
request,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',

View File

@ -4,7 +4,7 @@ const { errors, s3middleware } = require('arsenal');
const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { convertToXml } = s3middleware.tagging;
@ -41,12 +41,12 @@ function objectGetTagging(authInfo, request, log, callback) {
bucketName,
objectKey,
versionId: reqVersionId,
requestType: 'objectGetTagging',
requestType: request.apiMethods || 'objectGetTagging',
request,
};
return async.waterfall([
next => metadataValidateBucketAndObj(metadataValParams, log,
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',

View File

@ -13,7 +13,7 @@ const { getPartNumber, getPartSize, getPartCountFromMd5 } =
const { config } = require('../Config');
const { locationConstraints } = config;
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { maximumAllowedPartCount } = require('../../constants');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
@ -48,11 +48,11 @@ function objectHead(authInfo, request, log, callback) {
objectKey,
versionId,
getDeleteMarker: true,
requestType: 'objectHead',
requestType: request.apiMethods || 'objectHead',
request,
};
return metadataValidateBucketAndObj(mdValParams, log,
return standardMetadataValidateBucketAndObj(mdValParams, request.actionImplicitDenies, log,
(err, bucket, objMD) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);

View File

@ -3,6 +3,7 @@ const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -71,7 +72,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
query,
} = request;
if (headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
!config.locationConstraints[headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', request.bucketName,
errors.InvalidStorageClass.code, 'putObject');
@ -98,7 +99,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
'The encryption method specified is not supported');
const requestType = request.apiMethods || 'objectPut';
const valParams = { authInfo, bucketName, objectKey, versionId,
requestType, request };
requestType, request, withVersionId: isPutVersion };
const canonicalID = authInfo.getCanonicalID();
if (hasNonPrintables(objectKey)) {
@ -242,6 +243,14 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
monitoring.promMetrics('PUT', bucketName, '200',
'putObject', newByteLength, oldByteLength, isVersionedObj,
null, ingestSize);
if (isPutVersion) {
const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt);
monitoring.lifecycleDuration.observe(
{ type: 'restore', location: objMD.dataStoreName },
durationMs / 1000);
}
return callback(null, responseHeaders);
});
});

View File

@ -88,6 +88,7 @@ function objectPutACL(authInfo, request, log, cb) {
versionId: reqVersionId,
getDeleteMarker: true,
requestType: request.apiMethods || 'objectPutACL',
request,
};
const possibleGrants = ['FULL_CONTROL', 'WRITE_ACP', 'READ', 'READ_ACP'];

View File

@ -9,12 +9,12 @@ const locationConstraintCheck =
require('./apiUtils/object/locationConstraintCheck');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const logger = require('../utilities/logger');
const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const versionIdUtils = versioning.VersionID;
@ -46,6 +46,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
versionId: reqVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
@ -68,7 +76,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
authInfo,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
requestType: 'objectPutPart',
checkQuota: false,
request,
};
@ -89,6 +98,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
objectKey: destObjectKey,
partNumber: paddedPartNumber,
uploadId,
enableQuota: true,
};
return async.waterfall([
@ -182,9 +192,16 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
return next(null, copyLocator.dataLocator, destBucketMD,
copyLocator.copyObjectSize, sourceVerId,
sourceLocationConstraintName);
sourceLocationConstraintName, sourceObjMD);
});
},
function _validateQuotas(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD, next) {
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
},
// get MPU shadow bucket to get splitter based on MD version
function getMpuShadowBucket(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
@ -382,10 +399,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
// Clean up the old data now that new metadata (with new
// data locations) has been stored
if (oldLocationsToDelete) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocationsToDelete, request.method, null,
delLog, err => {
log, err => {
if (err) {
// if error, log the error and move on as it is not
// relevant to the client as the client's

View File

@ -11,7 +11,6 @@ const { isBucketAuthorized } =
const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const logger = require('../utilities/logger');
const services = require('../services');
const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck');
@ -22,6 +21,7 @@ const { BackendInfo } = models;
const writeContinue = require('../utilities/writeContinue');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const skipError = new Error('skip');
@ -61,6 +61,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size });
monitoring.promMetrics('PUT', request.bucketName, 400,
@ -104,6 +107,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
const { objectKey } = request;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = request.apiMethods || 'objectPutPart';
return async.waterfall([
// Get the destination bucket.
@ -123,16 +129,15 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}),
// Check the bucket authorization.
(destinationBucket, next) => {
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = 'objectPut';
if (!isBucketAuthorized(destinationBucket, request.apiMethods || requestType, canonicalID, authInfo,
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { requestType });
return next(errors.AccessDenied, destinationBucket);
}
return next(null, destinationBucket);
},
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
// Get bucket server-side encryption, if it exists.
(destinationBucket, next) => getObjectSSEConfiguration(
request.headers, destinationBucket, log,
@ -380,10 +385,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
prevObjectSize, next) => {
if (oldLocationsToDelete) {
log.trace('overwriting mpu part, deleting data');
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocationsToDelete, request.method,
objectLocationConstraint, delLog, err => {
objectLocationConstraint, log, err => {
if (err) {
// if error, log the error and move on as it is not
// relevant to the client as the client's

View File

@ -50,41 +50,49 @@ function objectPutRetention(authInfo, request, log, callback) {
};
return async.waterfall([
next => standardMetadataValidateBucketAndObj(metadataValParams, request.actionImplicitDenies, log,
(err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',
{ method: 'objectPutRetention', error: err });
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error no object metadata found',
{ method: 'objectPutRetention', error: err });
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
log.trace('version is a delete marker',
{ method: 'objectPutRetention' });
// FIXME we should return a `x-amz-delete-marker: true` header,
// see S3C-7592
return next(errors.MethodNotAllowed, bucket);
}
if (!bucket.isObjectLockEnabled()) {
log.trace('object lock not enabled on bucket',
{ method: 'objectPutRetention' });
return next(errors.InvalidRequest.customizeDescription(
'Bucket is missing Object Lock Configuration'
), bucket);
}
return next(null, bucket, objectMD);
}),
(bucket, objectMD, next) => {
next => {
log.trace('parsing retention information');
parseRetentionXml(request.post, log,
(err, retentionInfo) => next(err, bucket, retentionInfo, objectMD));
(err, retentionInfo) => {
if (err) {
log.trace('error parsing retention information',
{ error: err });
return next(err);
}
const remainingDays = Math.ceil(
(new Date(retentionInfo.date) - Date.now()) / (1000 * 3600 * 24));
metadataValParams.request.objectLockRetentionDays = remainingDays;
return next(null, retentionInfo);
});
},
(retentionInfo, next) => standardMetadataValidateBucketAndObj(metadataValParams,
request.actionImplicitDenies, log, (err, bucket, objectMD) => {
if (err) {
log.trace('request authorization failed',
{ method: 'objectPutRetention', error: err });
return next(err);
}
if (!objectMD) {
const err = reqVersionId ? errors.NoSuchVersion :
errors.NoSuchKey;
log.trace('error no object metadata found',
{ method: 'objectPutRetention', error: err });
return next(err, bucket);
}
if (objectMD.isDeleteMarker) {
log.trace('version is a delete marker',
{ method: 'objectPutRetention' });
return next(errors.MethodNotAllowed, bucket);
}
if (!bucket.isObjectLockEnabled()) {
log.trace('object lock not enabled on bucket',
{ method: 'objectPutRetention' });
return next(errors.InvalidRequest.customizeDescription(
'Bucket is missing Object Lock Configuration'
), bucket);
}
return next(null, bucket, retentionInfo, objectMD);
}),
(bucket, retentionInfo, objectMD, next) => {
const hasGovernanceBypass = hasGovernanceBypassHeader(request.headers);
if (hasGovernanceBypass && isRequesterNonAccountUser(authInfo)) {

313
lib/api/website.js Normal file
View File

@ -0,0 +1,313 @@
const { errors, s3middleware } = require('arsenal');
const validateHeaders = s3middleware.validateConditionalHeaders;
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { appendWebsiteIndexDocument, findRoutingRule, extractRedirectInfo } =
require('./apiUtils/object/websiteServing');
const { isObjAuthorized, isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/**
* _errorActions - take a number of actions once have error getting obj
* @param {object} err - arsenal errors object
* @param {string} errorDocument - key to get error document
* @param {object []} routingRules - array of routingRule objects
* @param {object} bucket - bucket metadata
* @param {string} objectKey - object key from request (or as translated in
* website)
* @param {object} corsHeaders - CORS-related response headers
* @param {object} request - normalized request object
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function _errorActions(err, errorDocument, routingRules,
bucket, objectKey, corsHeaders, request, log, callback) {
const bucketName = bucket.getName();
const errRoutingRule = findRoutingRule(routingRules,
objectKey, err.code);
if (errRoutingRule) {
// route will redirect
const action = request.method === 'HEAD' ? 'headObject' : 'getObject';
monitoring.promMetrics(
request.method, bucketName, err.code, action);
return callback(err, false, null, corsHeaders, errRoutingRule,
objectKey);
}
if (request.method === 'HEAD') {
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headObject');
return callback(err, false, null, corsHeaders);
}
if (errorDocument) {
return metadata.getObjectMD(bucketName, errorDocument, {}, log,
(errObjErr, errObjMD) => {
if (errObjErr) {
// error retrieving error document so return original error
// and set boolean of error retrieving user's error document
// to true
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders);
}
// return the default error message if the object is private
// rather than sending a stored error file
// eslint-disable-next-line no-param-reassign
request.objectKey = errorDocument;
if (!isObjAuthorized(bucket, errObjMD, request.apiMethods || 'objectGet',
constants.publicId, null, log, request, request.actionImplicitDenies, true)) {
log.trace('errorObj not authorized', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders);
}
const dataLocator = errObjMD.location;
if (errObjMD['x-amz-server-side-encryption']) {
for (let i = 0; i < dataLocator.length; i++) {
dataLocator[i].masterKeyId =
errObjMD['x-amz-server-side-encryption-aws-' +
'kms-key-id'];
dataLocator[i].algorithm =
errObjMD['x-amz-server-side-encryption'];
}
}
if (errObjMD['x-amz-website-redirect-location']) {
const redirectLocation =
errObjMD['x-amz-website-redirect-location'];
const redirectInfo = { withError: true,
location: redirectLocation };
log.trace('redirecting to x-amz-website-redirect-location',
{ location: redirectLocation });
return callback(err, false, dataLocator, corsHeaders,
redirectInfo, '');
}
const responseMetaHeaders = collectResponseHeaders(errObjMD,
corsHeaders);
pushMetric('getObject', log, {
bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'],
});
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, dataLocator, responseMetaHeaders);
});
}
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, null, corsHeaders);
}
function capitalize(str) {
if (!str || typeof str !== 'string') {
return str;
}
return str.charAt(0).toUpperCase() + str.slice(1);
}
/**
* Callbacks have different signature for GET and HEAD
* The website function uses GET callback signature
* This encapsulate HEAD callback to match GET signature
* @param {function} callback - HEAD callback
* @returns {function} HEAD callback with GET signature
*/
function callbackGetToHead(callback) {
return (err, userErrorPageFailure, dataGetInfo,
resMetaHeaders, redirectInfo, key) =>
callback(err, resMetaHeaders, redirectInfo, key);
}
/**
* Website - Common website function for GET and HEAD
* Gets metadata and object for website or redirects
* @param {object} request - normalized request object
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function website(request, log, callback) {
if (request.method === 'HEAD') {
// eslint-disable-next-line no-param-reassign
callback = callbackGetToHead(callback);
}
const methodCapitalized = capitalize(request.method);
const action = request.method === 'HEAD' ? 'headObject' : 'getObject';
log.debug('processing request', { method: `website${methodCapitalized}` });
const bucketName = request.bucketName;
const reqObjectKey = request.objectKey ? request.objectKey : '';
return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) {
log.trace('error retrieving bucket metadata', { error: err });
monitoring.promMetrics(
request.method, bucketName, err.code, action);
return callback(err, false);
}
if (bucketShield(bucket, `object${methodCapitalized}`)) {
log.trace('bucket in transient/deleted state so shielding');
monitoring.promMetrics(
request.method, bucketName, 404, action);
return callback(errors.NoSuchBucket, false);
}
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
// bucket ACL's do not matter for website head since it is always the
// head of an object. object ACL's are what matter
const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) {
monitoring.promMetrics(
request.method, bucketName, 404, action);
return callback(errors.NoSuchWebsiteConfiguration, false, null,
corsHeaders);
}
// any errors above would be our own created generic error html
// if have a website config, error going forward would be user's
// redirect or error page if they set either in the config
// handle redirect all
if (websiteConfig.getRedirectAllRequestsTo()) {
return callback(null, false, null, corsHeaders,
websiteConfig.getRedirectAllRequestsTo(), reqObjectKey);
}
// check whether need to redirect based on key
const routingRules = websiteConfig.getRoutingRules();
const keyRoutingRule = findRoutingRule(routingRules, reqObjectKey);
if (keyRoutingRule) {
// TODO: optimize by not rerouting if only routing
// rule is to change out key
return callback(null, false, null, corsHeaders,
keyRoutingRule, reqObjectKey);
}
appendWebsiteIndexDocument(request, websiteConfig.getIndexDocument());
/**
* Recursive function with 1 recursive call to look for index
* in case of error for potential redirect to folder notation
* if there is not already an index appended
* @param {Error} [originalError] - presence of this argument
* differentiates original user request from recursive call to /index.
* This error is returned if /index is not found
* @returns {undefined}
*/
function runWebsite(originalError) {
// get object metadata and check authorization and header
// validation
return metadata.getObjectMD(bucketName, request.objectKey, {}, log,
(err, objMD) => {
// Note: In case of error, we intentionally send the original
// object key to _errorActions as in case of a redirect, we do
// not want to append index key to redirect location
if (err) {
log.trace('error retrieving object metadata',
{ error: err });
let returnErr = err;
const bucketAuthorized = isBucketAuthorized(bucket, request.apiMethods || 'bucketGet',
constants.publicId, null, log, request, request.actionImplicitDenies, true);
// if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error.
if (err.is.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied;
}
// Check if key is a folder containing index for redirect 302
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/IndexDocumentSupport.html
if (!originalError && reqObjectKey && !reqObjectKey.endsWith('/')) {
appendWebsiteIndexDocument(request, websiteConfig.getIndexDocument(), true);
// propagate returnErr as originalError to be used if index is not found
return runWebsite(returnErr);
}
return _errorActions(originalError || returnErr,
websiteConfig.getErrorDocument(), routingRules,
bucket, reqObjectKey, corsHeaders, request, log,
callback);
}
if (!isObjAuthorized(bucket, objMD, request.apiMethods || 'objectGet',
constants.publicId, null, log, request, request.actionImplicitDenies, true)) {
const err = errors.AccessDenied;
log.trace('request not authorized', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucket,
reqObjectKey, corsHeaders, request, log, callback);
}
// access granted to index document, needs a redirect 302
// to the original key with trailing /
if (originalError) {
const redirectInfo = { withError: true,
location: `/${reqObjectKey}/` };
return callback(errors.Found, false, null, corsHeaders,
redirectInfo, '');
}
const headerValResult = validateHeaders(request.headers,
objMD['last-modified'], objMD['content-md5']);
if (headerValResult.error) {
const err = headerValResult.error;
log.trace('header validation error', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucket, reqObjectKey,
corsHeaders, request, log, callback);
}
// check if object to serve has website redirect header
// Note: AWS prioritizes website configuration rules over
// object key's website redirect header, so we make the
// check at the end.
if (objMD['x-amz-website-redirect-location']) {
const redirectLocation =
objMD['x-amz-website-redirect-location'];
const redirectInfo =
extractRedirectInfo(redirectLocation);
log.trace('redirecting to x-amz-website-redirect-location',
{ location: redirectLocation });
return callback(null, false, null, corsHeaders,
redirectInfo, '');
}
// got obj metadata, authorized and headers validated,
// good to go
const responseMetaHeaders = collectResponseHeaders(objMD,
corsHeaders);
if (request.method === 'HEAD') {
pushMetric('headObject', log, { bucket: bucketName });
monitoring.promMetrics('HEAD',
bucketName, '200', 'headObject');
return callback(null, false, null, responseMetaHeaders);
}
const dataLocator = objMD.location;
if (objMD['x-amz-server-side-encryption']) {
for (let i = 0; i < dataLocator.length; i++) {
dataLocator[i].masterKeyId =
objMD['x-amz-server-side-encryption-aws-' +
'kms-key-id'];
dataLocator[i].algorithm =
objMD['x-amz-server-side-encryption'];
}
}
pushMetric('getObject', log, {
bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'],
});
monitoring.promMetrics('GET', bucketName, '200',
'getObject', responseMetaHeaders['Content-Length']);
return callback(null, false, dataLocator, responseMetaHeaders);
});
}
return runWebsite();
});
}
module.exports = website;

View File

@ -1,234 +0,0 @@
const { errors, s3middleware } = require('arsenal');
const validateHeaders = s3middleware.validateConditionalHeaders;
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { findRoutingRule, extractRedirectInfo } =
require('./apiUtils/object/websiteServing');
const { isObjAuthorized, isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/**
* _errorActions - take a number of actions once have error getting obj
* @param {object} err - arsenal errors object
* @param {string} errorDocument - key to get error document
* @param {object []} routingRules - array of routingRule objects
* @param {object} bucket - bucket metadata
* @param {string} objectKey - object key from request (or as translated in
* websiteGet)
* @param {object} corsHeaders - CORS-related response headers
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function _errorActions(err, errorDocument, routingRules,
bucket, objectKey, corsHeaders, log, callback) {
const bucketName = bucket.getName();
const errRoutingRule = findRoutingRule(routingRules,
objectKey, err.code);
if (errRoutingRule) {
// route will redirect
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, null, corsHeaders, errRoutingRule,
objectKey);
}
if (errorDocument) {
return metadata.getObjectMD(bucketName, errorDocument, {}, log,
(errObjErr, errObjMD) => {
if (errObjErr) {
// error retrieving error document so return original error
// and set boolean of error retrieving user's error document
// to true
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders);
}
// return the default error message if the object is private
// rather than sending a stored error file
if (!isObjAuthorized(bucket, errObjMD, 'objectGet',
constants.publicId, null, log)) {
log.trace('errorObj not authorized', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders);
}
const dataLocator = errObjMD.location;
if (errObjMD['x-amz-server-side-encryption']) {
for (let i = 0; i < dataLocator.length; i++) {
dataLocator[i].masterKeyId =
errObjMD['x-amz-server-side-encryption-aws-' +
'kms-key-id'];
dataLocator[i].algorithm =
errObjMD['x-amz-server-side-encryption'];
}
}
const responseMetaHeaders = collectResponseHeaders(errObjMD,
corsHeaders);
pushMetric('getObject', log, {
bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'],
});
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, dataLocator, responseMetaHeaders);
});
}
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, null, corsHeaders);
}
/**
* GET Website - Gets object for website or redirects
* @param {object} request - normalized request object
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function websiteGet(request, log, callback) {
log.debug('processing request', { method: 'websiteGet' });
const bucketName = request.bucketName;
const reqObjectKey = request.objectKey ? request.objectKey : '';
let objectKey = reqObjectKey;
return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) {
log.trace('error retrieving bucket metadata', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false);
}
if (bucketShield(bucket, 'objectGet')) {
log.trace('bucket in transient/deleted state so shielding');
monitoring.promMetrics(
'GET', bucketName, 404, 'getObject');
return callback(errors.NoSuchBucket, false);
}
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getObject');
return callback(errors.NoSuchWebsiteConfiguration, false, null,
corsHeaders);
}
// any errors above would be our own created generic error html
// if have a website config, error going forward would be user's
// redirect or error page if they set either in the config
// handle redirect all
if (websiteConfig.getRedirectAllRequestsTo()) {
return callback(null, false, null, corsHeaders,
websiteConfig.getRedirectAllRequestsTo(), objectKey);
}
// check whether need to redirect based on key
const routingRules = websiteConfig.getRoutingRules();
const keyRoutingRule = findRoutingRule(routingRules, objectKey);
if (keyRoutingRule) {
// TODO: optimize by not rerouting if only routing
// rule is to change out key
return callback(null, false, null, corsHeaders,
keyRoutingRule, objectKey);
}
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
objectKey += websiteConfig.getIndexDocument();
}
// find index document if no key provided
if (reqObjectKey === '') {
objectKey = websiteConfig.getIndexDocument();
}
// get object metadata and check authorization and header
// validation
return metadata.getObjectMD(bucketName, objectKey, {}, log,
(err, objMD) => {
// Note: In case of error, we intentionally send the original
// object key to _errorActions as in case of a redirect, we do
// not want to append index key to redirect location
if (err) {
log.trace('error retrieving object metadata',
{ error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
let returnErr = err;
const bucketAuthorized = isBucketAuthorized(bucket,
'bucketGet', constants.publicId, null, log, request);
// if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error.
if (err.is.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied;
}
return _errorActions(returnErr,
websiteConfig.getErrorDocument(), routingRules,
bucket, reqObjectKey, corsHeaders, log,
callback);
}
if (!isObjAuthorized(bucket, objMD, 'objectGet',
constants.publicId, null, log, request)) {
const err = errors.AccessDenied;
log.trace('request not authorized', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucket,
reqObjectKey, corsHeaders, log, callback);
}
const headerValResult = validateHeaders(request.headers,
objMD['last-modified'], objMD['content-md5']);
if (headerValResult.error) {
const err = headerValResult.error;
log.trace('header validation error', { error: err });
return _errorActions(err, websiteConfig.getErrorDocument(),
routingRules, bucket, reqObjectKey,
corsHeaders, log, callback);
}
// check if object to serve has website redirect header
// Note: AWS prioritizes website configuration rules over
// object key's website redirect header, so we make the
// check at the end.
if (objMD['x-amz-website-redirect-location']) {
const redirectLocation =
objMD['x-amz-website-redirect-location'];
const redirectInfo =
extractRedirectInfo(redirectLocation);
log.trace('redirecting to x-amz-website-redirect-location',
{ location: redirectLocation });
return callback(null, false, null, corsHeaders,
redirectInfo, '');
}
// got obj metadata, authorized and headers validated,
// good to go
const responseMetaHeaders = collectResponseHeaders(objMD,
corsHeaders);
const dataLocator = objMD.location;
if (objMD['x-amz-server-side-encryption']) {
for (let i = 0; i < dataLocator.length; i++) {
dataLocator[i].masterKeyId =
objMD['x-amz-server-side-encryption-aws-' +
'kms-key-id'];
dataLocator[i].algorithm =
objMD['x-amz-server-side-encryption'];
}
}
pushMetric('getObject', log, {
bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'],
});
monitoring.promMetrics('GET', bucketName, '200',
'getObject', responseMetaHeaders['Content-Length']);
return callback(null, false, dataLocator, responseMetaHeaders);
});
});
}
module.exports = websiteGet;

View File

@ -1,168 +0,0 @@
const { errors, s3middleware } = require('arsenal');
const validateHeaders = s3middleware.validateConditionalHeaders;
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const metadata = require('../metadata/wrapper');
const bucketShield = require('./apiUtils/bucket/bucketShield');
const { findRoutingRule, extractRedirectInfo } =
require('./apiUtils/object/websiteServing');
const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { isBucketAuthorized, isObjAuthorized } =
require('./apiUtils/authorization/permissionChecks');
/**
* _errorActions - take a number of actions once have error getting obj
* @param {object} err - arsenal errors object
* @param {object []} routingRules - array of routingRule objects
* @param {string} objectKey - object key from request (or as translated in
* websiteGet)
* @param {object} corsHeaders - CORS-related response headers
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function _errorActions(err, routingRules, objectKey, corsHeaders, log,
callback) {
const errRoutingRule = findRoutingRule(routingRules, objectKey, err.code);
if (errRoutingRule) {
// route will redirect
return callback(err, corsHeaders, errRoutingRule, objectKey);
}
return callback(err, corsHeaders);
}
/**
* HEAD Website - Gets metadata for object for website or redirects
* @param {object} request - normalized request object
* @param {object} log - Werelogs instance
* @param {function} callback - callback to function in route
* @return {undefined}
*/
function websiteHead(request, log, callback) {
log.debug('processing request', { method: 'websiteHead' });
const bucketName = request.bucketName;
const reqObjectKey = request.objectKey ? request.objectKey : '';
let objectKey = reqObjectKey;
return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) {
log.trace('error retrieving bucket metadata', { error: err });
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headObject');
return callback(err);
}
if (bucketShield(bucket, 'objectHead')) {
log.trace('bucket in transient/deleted state so shielding');
monitoring.promMetrics(
'HEAD', bucketName, 404, 'headObject');
return callback(errors.NoSuchBucket);
}
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
// bucket ACL's do not matter for website head since it is always the
// head of an object. object ACL's are what matter
const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) {
monitoring.promMetrics(
'HEAD', bucketName, 404, 'headObject');
return callback(errors.NoSuchWebsiteConfiguration);
}
// any errors above would be generic header error response
// if have a website config, error going forward could be redirect
// if a redirect rule for error is in config
// handle redirect all
if (websiteConfig.getRedirectAllRequestsTo()) {
return callback(null, corsHeaders,
websiteConfig.getRedirectAllRequestsTo(), objectKey);
}
// find index document if "directory" sent in request
if (reqObjectKey.endsWith('/')) {
objectKey += websiteConfig.getIndexDocument();
}
// find index document if no key provided
if (reqObjectKey === '') {
objectKey = websiteConfig.getIndexDocument();
}
// check whether need to redirect based on key
const routingRules = websiteConfig.getRoutingRules();
const keyRoutingRule = findRoutingRule(routingRules, objectKey);
if (keyRoutingRule) {
return callback(null, corsHeaders, keyRoutingRule, reqObjectKey);
}
// get object metadata and check authorization and header
// validation
return metadata.getObjectMD(bucketName, objectKey, {}, log,
(err, objMD) => {
// Note: In case of error, we intentionally send the original
// object key to _errorActions as in case of a redirect, we do
// not want to append index key to redirect location
if (err) {
log.trace('error retrieving object metadata',
{ error: err });
let returnErr = err;
const bucketAuthorized = isBucketAuthorized(bucket,
'bucketGet', constants.publicId, null, log, request);
// if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error.
if (err.is.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied;
}
return _errorActions(returnErr, routingRules,
reqObjectKey, corsHeaders, log, callback);
}
if (!isObjAuthorized(bucket, objMD, 'objectGet',
constants.publicId, null, log, request)) {
const err = errors.AccessDenied;
log.trace('request not authorized', { error: err });
return _errorActions(err, routingRules, reqObjectKey,
corsHeaders, log, callback);
}
const headerValResult = validateHeaders(request.headers,
objMD['last-modified'], objMD['content-md5']);
if (headerValResult.error) {
const err = headerValResult.error;
log.trace('header validation error', { error: err });
return _errorActions(err, routingRules, reqObjectKey,
corsHeaders, log, callback);
}
// check if object to serve has website redirect header
// Note: AWS prioritizes website configuration rules over
// object key's website redirect header, so we make the
// check at the end.
if (objMD['x-amz-website-redirect-location']) {
const redirectLocation =
objMD['x-amz-website-redirect-location'];
const redirectInfo =
extractRedirectInfo(redirectLocation);
log.trace('redirecting to x-amz-website-redirect-location',
{ location: redirectLocation });
return callback(null, corsHeaders, redirectInfo, '');
}
// got obj metadata, authorized and headers validated,
// good to go
const responseMetaHeaders = collectResponseHeaders(objMD,
corsHeaders);
pushMetric('headObject', log, {
bucket: bucketName,
});
monitoring.promMetrics(
'HEAD', bucketName, '200', 'headObject');
return callback(null, responseMetaHeaders);
});
});
}
module.exports = websiteHead;

View File

@ -1,4 +1,3 @@
const vaultclient = require('vaultclient');
const { auth } = require('arsenal');
const { config } = require('../Config');
@ -21,6 +20,7 @@ function getVaultClient(config) {
port,
https: true,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
} else {
logger.info('vaultclient configuration', {
@ -28,6 +28,7 @@ function getVaultClient(config) {
port,
https: false,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port);
}
@ -49,10 +50,6 @@ function getMemBackend(config) {
}
switch (config.backends.auth) {
case 'mem':
implName = 'vaultMem';
client = getMemBackend(config);
break;
case 'multiple':
implName = 'vaultChain';
client = new ChainBackend('s3', [
@ -60,9 +57,14 @@ case 'multiple':
getVaultClient(config),
]);
break;
default: // vault
case 'vault':
implName = 'vault';
client = getVaultClient(config);
break;
default: // mem
implName = 'vaultMem';
client = getMemBackend(config);
break;
}
module.exports = new Vault(client, implName);

View File

@ -8,20 +8,6 @@ const inMemory = require('./in_memory/backend').backend;
const file = require('./file/backend');
const KMIPClient = require('arsenal').network.kmipClient;
const Common = require('./common');
let scalityKMS;
let scalityKMSImpl;
try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms';
} catch (error) {
logger.warn('scality kms unavailable. ' +
'Using file kms backend unless mem specified.',
{ error });
scalityKMS = file;
scalityKMSImpl = 'fileKms';
}
let client;
let implName;
@ -33,8 +19,9 @@ if (config.backends.kms === 'mem') {
client = file;
implName = 'fileKms';
} else if (config.backends.kms === 'scality') {
client = scalityKMS;
implName = scalityKMSImpl;
const ScalityKMS = require('scality-kms');
client = new ScalityKMS(config.kms);
implName = 'scalityKms';
} else if (config.backends.kms === 'kmip') {
const kmipConfig = { kmip: config.kmip };
if (!kmipConfig.kmip) {

View File

@ -1,131 +0,0 @@
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const MessageType = {
/** Message that contains a configuration overlay */
CONFIG_OVERLAY_MESSAGE: 1,
/** Message that requests a metrics report */
METRICS_REQUEST_MESSAGE: 2,
/** Message that contains a metrics report */
METRICS_REPORT_MESSAGE: 3,
/** Close the virtual TCP socket associated to the channel */
CHANNEL_CLOSE_MESSAGE: 4,
/** Write data to the virtual TCP socket associated to the channel */
CHANNEL_PAYLOAD_MESSAGE: 5,
};
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const TargetType = {
/** Let the dispatcher choose the most appropriate message */
TARGET_ANY: 0,
};
const headerSize = 3;
class ChannelMessageV0 {
/**
* @param {Buffer} buffer Message bytes
*/
constructor(buffer) {
this.messageType = buffer.readUInt8(0);
this.channelNumber = buffer.readUInt8(1);
this.target = buffer.readUInt8(2);
this.payload = buffer.slice(headerSize);
}
/**
* @returns {number} Message type
*/
getType() {
return this.messageType;
}
/**
* @returns {number} Channel number if applicable
*/
getChannelNumber() {
return this.channelNumber;
}
/**
* @returns {number} Target service, or 0 to choose automatically
*/
getTarget() {
return this.target;
}
/**
* @returns {Buffer} Message payload if applicable
*/
getPayload() {
return this.payload;
}
/**
* Creates a wire representation of a channel close message
*
* @param {number} channelId Channel number
*
* @returns {Buffer} wire representation
*/
static encodeChannelCloseMessage(channelId) {
const buf = Buffer.alloc(headerSize);
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
return buf;
}
/**
* Creates a wire representation of a channel data message
*
* @param {number} channelId Channel number
* @param {Buffer} data Payload
*
* @returns {Buffer} wire representation
*/
static encodeChannelDataMessage(channelId, data) {
const buf = Buffer.alloc(data.length + headerSize);
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
data.copy(buf, headerSize);
return buf;
}
/**
* Creates a wire representation of a metrics message
*
* @param {object} body Metrics report
*
* @returns {Buffer} wire representation
*/
static encodeMetricsReportMessage(body) {
const report = JSON.stringify(body);
const buf = Buffer.alloc(report.length + headerSize);
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
buf.writeUInt8(0, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
buf.write(report, headerSize);
return buf;
}
/**
* Protocol name used for subprotocol negociation
*/
static get protocolName() {
return 'zenko-secure-channel-v0';
}
}
module.exports = {
ChannelMessageV0,
MessageType,
TargetType,
};

View File

@ -1,94 +0,0 @@
const WebSocket = require('ws');
const arsenal = require('arsenal');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
const { patchConfiguration } = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const managementAgentMessageType = {
/** Message that contains the loaded overlay */
NEW_OVERLAY: 1,
};
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
function initManagementClient() {
const { host, port } = _config.managementAgent;
const ws = new WebSocket(`ws://${host}:${port}/watch`);
ws.on('open', () => {
logger.info('connected with management agent');
});
ws.on('close', (code, reason) => {
logger.info('disconnected from management agent', { reason });
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
});
ws.on('error', error => {
logger.error('error on connection with management agent', { error });
});
ws.on('message', data => {
const method = 'initManagementclient::onMessage';
const log = logger.newRequestLogger();
let msg;
if (!data) {
log.error('message without data', { method });
return;
}
try {
msg = JSON.parse(data);
} catch (err) {
log.error('data is an invalid json', { method, err, data });
return;
}
if (msg.payload === undefined) {
log.error('message without payload', { method });
return;
}
if (typeof msg.messageType !== 'number') {
log.error('messageType is not an integer', {
type: typeof msg.messageType,
method,
});
return;
}
switch (msg.messageType) {
case managementAgentMessageType.NEW_OVERLAY:
patchConfiguration(msg.payload, log, err => {
if (err) {
log.error('failed to patch overlay', {
error: reshapeExceptionError(err),
method,
});
}
});
return;
default:
log.error('new overlay message with unmanaged message type', {
method,
type: msg.messageType,
});
return;
}
});
}
function isManagementAgentUsed() {
return process.env.MANAGEMENT_USE_AGENT === '1';
}
module.exports = {
managementAgentMessageType,
initManagementClient,
isManagementAgentUsed,
};

View File

@ -1,240 +0,0 @@
const arsenal = require('arsenal');
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
const _config = require('../Config').config;
const metadata = require('../metadata/wrapper');
const { getStoredCredentials } = require('./credentials');
const latestOverlayVersionKey = 'configuration/overlay-version';
const managementDatabaseName = 'PENSIEVE';
const replicatorEndpoint = 'zenko-cloudserver-replicator';
const { decryptSecret } = arsenal.pensieve.credentialUtils;
const { patchLocations } = arsenal.patches.locationConstraints;
const { reshapeExceptionError } = arsenal.errorUtils;
const { replicationBackends } = require('arsenal').constants;
function overlayHasVersion(overlay) {
return overlay && overlay.version !== undefined;
}
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
return (overlayHasVersion(remoteOverlay) &&
(!overlayHasVersion(cachedOverlay) ||
remoteOverlay.version > cachedOverlay.version));
}
/**
* Updates the live {Config} object with the new overlay configuration.
*
* No-op if this version was already applied to the live {Config}.
*
* @param {object} newConf Overlay configuration to apply
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, newConf)
*
* @returns {undefined}
*/
function patchConfiguration(newConf, log, cb) {
if (newConf.version === undefined) {
log.debug('no remote configuration created yet');
return process.nextTick(cb, null, newConf);
}
if (_config.overlayVersion !== undefined &&
newConf.version <= _config.overlayVersion) {
log.debug('configuration version already applied',
{ configurationVersion: newConf.version });
return process.nextTick(cb, null, newConf);
}
return getStoredCredentials(log, (err, creds) => {
if (err) {
return cb(err);
}
const accounts = [];
if (newConf.users) {
newConf.users.forEach(u => {
if (u.secretKey && u.secretKey.length > 0) {
const secretKey = decryptSecret(creds, u.secretKey);
// accountType will be service-replication or service-clueso
let serviceName;
if (u.accountType && u.accountType.startsWith('service-')) {
serviceName = u.accountType.split('-')[1];
}
const newAccount = buildAuthDataAccount(
u.accessKey, secretKey, u.canonicalId, serviceName,
u.userName);
accounts.push(newAccount.accounts[0]);
}
});
}
const restEndpoints = Object.assign({}, _config.restEndpoints);
if (newConf.endpoints) {
newConf.endpoints.forEach(e => {
restEndpoints[e.hostname] = e.locationName;
});
}
if (!restEndpoints[replicatorEndpoint]) {
restEndpoints[replicatorEndpoint] = 'us-east-1';
}
const locations = patchLocations(newConf.locations, creds, log);
if (Object.keys(locations).length !== 0) {
try {
_config.setLocationConstraints(locations);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply configuration version location ' +
'constraints', { error: exceptionError,
method: 'getStoredCredentials' });
return cb(exceptionError);
}
try {
const locationsWithReplicationBackend = Object.keys(locations)
// NOTE: In Orbit, we don't need to have Scality location in our
// replication endpoind config, since we do not replicate to
// any Scality Instance yet.
.filter(key => replicationBackends
[locations[key].type])
.reduce((obj, key) => {
/* eslint no-param-reassign:0 */
obj[key] = locations[key];
return obj;
}, {});
_config.setReplicationEndpoints(
locationsWithReplicationBackend);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply replication endpoints',
{ error: exceptionError, method: 'getStoredCredentials' });
return cb(exceptionError);
}
}
_config.setAuthDataAccounts(accounts);
_config.setRestEndpoints(restEndpoints);
_config.setPublicInstanceId(newConf.instanceId);
if (newConf.browserAccess) {
if (Boolean(_config.browserAccessEnabled) !==
Boolean(newConf.browserAccess.enabled)) {
_config.browserAccessEnabled =
Boolean(newConf.browserAccess.enabled);
_config.emit('browser-access-enabled-change');
}
}
_config.overlayVersion = newConf.version;
log.info('applied configuration version',
{ configurationVersion: _config.overlayVersion });
return cb(null, newConf);
});
}
/**
* Writes configuration version to the management database
*
* @param {object} cachedOverlay Latest stored configuration version
* for freshness comparison purposes
* @param {object} remoteOverlay New configuration version
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, remoteOverlay)
*
* @returns {undefined}
*/
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
const objName = `configuration/overlay/${remoteOverlay.version}`;
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
{}, log, error => {
if (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not save configuration',
{ error: exceptionError,
method: 'saveConfigurationVersion',
configurationVersion: remoteOverlay.version });
cb(exceptionError);
return;
}
metadata.putObjectMD(managementDatabaseName,
latestOverlayVersionKey, remoteOverlay.version, {}, log,
error => {
if (error) {
log.error('could not save configuration version', {
configurationVersion: remoteOverlay.version,
});
}
cb(error, remoteOverlay);
});
});
} else {
log.debug('no remote configuration to cache yet');
process.nextTick(cb, null, remoteOverlay);
}
}
/**
* Loads the latest cached configuration overlay from the management
* database, without contacting the Orbit API.
*
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} callback Function called with (error, cachedOverlay)
*
* @returns {undefined}
*/
function loadCachedOverlay(log, callback) {
return metadata.getObjectMD(managementDatabaseName,
latestOverlayVersionKey, {}, log, (err, version) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return metadata.getObjectMD(managementDatabaseName,
`configuration/overlay/${version}`, {}, log, (err, conf) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return callback(null, conf);
});
});
}
function applyAndSaveOverlay(overlay, log) {
patchConfiguration(overlay, log, err => {
if (err) {
log.error('could not apply pushed overlay', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
saveConfigurationVersion(null, overlay, log, err => {
if (err) {
log.error('could not cache overlay version', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
log.info('overlay push processed');
});
});
}
module.exports = {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
saveConfigurationVersion,
remoteOverlayIsNewer,
applyAndSaveOverlay,
};

View File

@ -1,145 +0,0 @@
const arsenal = require('arsenal');
const forge = require('node-forge');
const request = require('../utilities/request');
const metadata = require('../metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
const { reshapeExceptionError } = arsenal.errorUtils;
/**
* Retrieves Orbit API token from the management database.
*
* The token is used to authenticate stat posting and
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function getStoredCredentials(log, callback) {
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
log, callback);
}
function issueCredentials(managementEndpoint, instanceId, log, callback) {
log.info('registering with API to get token');
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
const postData = {
publicKey,
};
request.post(`${managementEndpoint}/${instanceId}/register`,
{ body: postData, json: true }, (error, response, body) => {
if (error) {
return callback(error);
}
if (response.statusCode !== 201) {
log.error('could not register instance', {
statusCode: response.statusCode,
});
return callback(arsenal.errors.InternalError);
}
/* eslint-disable no-param-reassign */
body.privateKey = privateKey;
/* eslint-enable no-param-reassign */
return callback(null, body);
});
}
function confirmInstanceCredentials(
managementEndpoint, instanceId, creds, log, callback) {
const postData = {
serial: creds.serial || 0,
publicKey: creds.publicKey,
};
const opts = {
headers: {
'x-instance-authentication-token': creds.token,
},
body: postData,
};
request.post(`${managementEndpoint}/${instanceId}/confirm`,
opts, (error, response) => {
if (error) {
return callback(error);
}
if (response.statusCode === 200) {
return callback(null, instanceId, creds.token);
}
return callback(arsenal.errors.InternalError);
});
}
/**
* Initializes credentials and PKI in the management database.
*
* In case the management database is new and empty, the instance
* is registered as new against the Orbit API with newly-generated
* RSA key pair.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function initManagementCredentials(
managementEndpoint, instanceId, log, callback) {
getStoredCredentials(log, (error, value) => {
if (error) {
if (error.is.NoSuchKey) {
return issueCredentials(managementEndpoint, instanceId, log,
(error, value) => {
if (error) {
log.error('could not issue token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials' });
return callback(error);
}
log.debug('saving token');
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, value, {}, log, error => {
if (error) {
log.error('could not save token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials',
});
return callback(error);
}
log.info('saved token locally, ' +
'confirming instance');
return confirmInstanceCredentials(
managementEndpoint, instanceId, value, log,
callback);
});
});
}
log.debug('could not get token', { error });
return callback(error);
}
log.info('returning existing token');
if (Date.now() - value.issueDate > tokenRotationDelay) {
log.warn('management API token is too old, should re-issue');
}
return callback(null, instanceId, value.token);
});
}
module.exports = {
getStoredCredentials,
initManagementCredentials,
};

View File

@ -1,138 +0,0 @@
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('../metadata/wrapper');
const logger = require('../utilities/logger');
const {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
} = require('./configuration');
const { initManagementCredentials } = require('./credentials');
const { startWSManagementClient } = require('./push');
const { startPollingManagementClient } = require('./poll');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const initRemoteManagementRetryDelay = 10000;
const managementEndpointRoot =
process.env.MANAGEMENT_ENDPOINT ||
'https://api.zenko.io';
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
const pushEndpointRoot =
process.env.PUSH_ENDPOINT ||
'https://push.api.zenko.io';
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
function initManagementDatabase(log, callback) {
// XXX choose proper owner names
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
'owner display name', new Date().toJSON());
metadata.createBucket(managementDatabaseName, md, log, error => {
if (error) {
if (error.is.BucketAlreadyExists) {
log.info('created management database');
return callback();
}
log.error('could not initialize management database',
{ error: reshapeExceptionError(error),
method: 'initManagementDatabase' });
return callback(error);
}
log.info('initialized management database');
return callback();
});
}
function startManagementListeners(instanceId, token) {
const mode = process.env.MANAGEMENT_MODE || 'push';
if (mode === 'push') {
const url = `${pushEndpoint}/${instanceId}/ws`;
startWSManagementClient(url, token);
} else {
startPollingManagementClient(managementEndpoint, instanceId, token);
}
}
/**
* Initializes Orbit-based management by:
* - creating the management database in metadata
* - generating a key pair for credentials encryption
* - generating an instance-unique ID
* - getting an authentication token for the API
* - loading and applying the latest cached overlay configuration
* - starting a configuration update and metrics push background task
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function to call once the overlay is loaded
* (overlay)
*
* @returns {undefined}
*/
function initManagement(log, callback) {
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|| process.env.S3BACKEND === 'mem') {
log.info('remote management disabled');
return;
}
/* Temporary check before to fully move to the process management agent. */
if (isManagementAgentUsed() ^ typeof callback === 'function') {
let msg = 'misuse of initManagement function: ';
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
msg += `, callback type: ${typeof callback}`;
throw new Error(msg);
}
async.waterfall([
// eslint-disable-next-line arrow-body-style
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
cb => initManagementDatabase(log, cb),
cb => metadata.getUUID(log, cb),
(instanceId, cb) => initManagementCredentials(
managementEndpoint, instanceId, log, cb),
(instanceId, token, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, {});
return;
}
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
token, overlay));
},
(instanceId, token, overlay, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, overlay);
return;
}
patchConfiguration(overlay, log,
err => cb(err, instanceId, token, overlay));
},
], (error, instanceId, token, overlay) => {
if (error) {
log.error('could not initialize remote management, retrying later',
{ error: reshapeExceptionError(error),
method: 'initManagement' });
setTimeout(initManagement,
initRemoteManagementRetryDelay,
logger.newRequestLogger());
} else {
log.info(`this deployment's Instance ID is ${instanceId}`);
log.end('management init done');
startManagementListeners(instanceId, token);
if (callback) {
callback(overlay);
}
}
});
}
module.exports = {
initManagement,
initManagementDatabase,
};

View File

@ -1,157 +0,0 @@
const arsenal = require('arsenal');
const async = require('async');
const request = require('../utilities/request');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const {
loadCachedOverlay,
patchConfiguration,
saveConfigurationVersion,
} = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const pushReportDelay = 30000;
const pullConfigurationOverlayDelay = 60000;
function loadRemoteOverlay(
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
log.debug('loading remote overlay');
const opts = {
headers: {
'x-instance-authentication-token': remoteToken,
'x-scal-request-id': log.getSerializedUids(),
},
json: true,
};
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
(error, response, body) => {
if (error) {
return cb(error);
}
if (response.statusCode === 200) {
return cb(null, cachedOverlay, body);
}
if (response.statusCode === 404) {
return cb(null, cachedOverlay, {});
}
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
});
}
// TODO save only after successful patch
function applyConfigurationOverlay(
managementEndpoint, instanceId, remoteToken, log) {
async.waterfall([
wcb => loadCachedOverlay(log, wcb),
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
log, wcb),
(cachedOverlay, wcb) =>
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
cachedOverlay, log, wcb),
(cachedOverlay, remoteOverlay, wcb) =>
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
log, wcb),
], error => {
if (error) {
log.error('could not apply managed configuration',
{ error: reshapeExceptionError(error),
method: 'applyConfigurationOverlay' });
}
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
});
}
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
const toURL = `${managementEndpoint}/${instanceId}/stats`;
const toOptions = {
json: true,
headers: {
'content-type': 'application/json',
'x-instance-authentication-token': remoteToken,
},
body: report,
};
const toCallback = (err, response, body) => {
if (err) {
logger.info('could not post stats', { error: err });
}
if (response && response.statusCode !== 201) {
logger.info('could not post stats', {
body,
statusCode: response.statusCode,
});
}
if (next) {
next(null, instanceId, remoteToken);
}
};
return request.post(toURL, toOptions, toCallback);
}
function getStats(next) {
const fromURL = `http://localhost:${_config.port}/_/report`;
const fromOptions = {
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
},
};
return request.get(fromURL, fromOptions, next);
}
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
if (process.env.PUSH_STATS === 'false') {
return;
}
getStats((err, res, report) => {
if (err) {
logger.info('could not retrieve stats', { error: err });
return;
}
logger.debug('report', { report });
postStats(
managementEndpoint,
instanceId,
remoteToken,
report,
next
);
return;
});
setTimeout(pushStats, pushReportDelay,
managementEndpoint, instanceId, remoteToken);
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Periodically polls for configuration updates, and pushes stats at
* a fixed interval.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {string} remoteToken API authentication token
*
* @returns {undefined}
*/
function startPollingManagementClient(
managementEndpoint, instanceId, remoteToken) {
metadata.notifyBucketChange(() => {
pushStats(managementEndpoint, instanceId, remoteToken);
});
pushStats(managementEndpoint, instanceId, remoteToken);
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
}
module.exports = {
startPollingManagementClient,
};

View File

@ -1,301 +0,0 @@
const arsenal = require('arsenal');
const HttpsProxyAgent = require('https-proxy-agent');
const net = require('net');
const request = require('../utilities/request');
const { URL } = require('url');
const WebSocket = require('ws');
const assert = require('assert');
const http = require('http');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const { applyAndSaveOverlay } = require('./configuration');
const {
ChannelMessageV0,
MessageType,
} = require('./ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const PING_INTERVAL_MS = 10000;
const subprotocols = [ChannelMessageV0.protocolName];
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|| 'localhost';
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|| _config.port;
let overlayMessageListener = null;
let connected = false;
// No wildcard nor cidr/mask match for now
function createWSAgent(pushEndpoint, env, log) {
const url = new URL(pushEndpoint);
const noProxy = (env.NO_PROXY || env.no_proxy
|| '').split(',');
if (noProxy.includes(url.hostname)) {
log.info('push server ws has proxy exclusion', { noProxy });
return null;
}
if (url.protocol === 'https:' || url.protocol === 'wss:') {
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
if (httpsProxy) {
log.info('push server ws using https proxy', { httpsProxy });
return new HttpsProxyAgent(httpsProxy);
}
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
if (httpProxy) {
log.info('push server ws using http proxy', { httpProxy });
return new HttpsProxyAgent(httpProxy);
}
}
const allProxy = (env.ALL_PROXY || env.all_proxy);
if (allProxy) {
log.info('push server ws using wildcard proxy', { allProxy });
return new HttpsProxyAgent(allProxy);
}
log.info('push server ws not using proxy');
return null;
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Receives pushed Websocket messages on configuration updates, and
* sends stat messages in response to API sollicitations.
*
* @param {string} url API endpoint
* @param {string} token API authentication token
* @param {function} cb end-of-connection callback
*
* @returns {undefined}
*/
function startWSManagementClient(url, token, cb) {
logger.info('connecting to push server', { url });
function _logError(error, errorMessage, method) {
if (error) {
logger.error(`management client error: ${errorMessage}`,
{ error: reshapeExceptionError(error), method });
}
}
const socketsByChannelId = [];
const headers = {
'x-instance-authentication-token': token,
};
const agent = createWSAgent(url, process.env, logger);
const ws = new WebSocket(url, subprotocols, { headers, agent });
let pingTimeout = null;
function sendPing() {
if (ws.readyState === ws.OPEN) {
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
}
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
}
function initiatePing() {
clearTimeout(pingTimeout);
setTimeout(sendPing, PING_INTERVAL_MS);
}
function pushStats(options) {
if (process.env.PUSH_STATS === 'false') {
return;
}
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
const fromOptions = {
json: true,
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
'x-scal-report-skip-cache': Boolean(options && options.noCache),
},
};
request.get(fromURL, fromOptions, (err, response, body) => {
if (err) {
_logError(err, 'failed to get metrics report', 'pushStats');
return;
}
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
err => _logError(err, 'failed to send metrics report message',
'pushStats'));
});
}
function closeChannel(channelId) {
const socket = socketsByChannelId[channelId];
if (socket) {
socket.destroy();
delete socketsByChannelId[channelId];
}
}
function receiveChannelData(channelId, payload) {
let socket = socketsByChannelId[channelId];
if (!socket) {
socket = net.createConnection(cloudServerPort, cloudServerHost);
socket.on('data', data => {
ws.send(ChannelMessageV0.
encodeChannelDataMessage(channelId, data), err =>
_logError(err, 'failed to send channel data message',
'receiveChannelData'));
});
socket.on('connect', () => {
});
socket.on('drain', () => {
});
socket.on('error', error => {
logger.error('failed to connect to S3', {
code: error.code,
host: error.address,
port: error.port,
});
});
socket.on('end', () => {
socket.destroy();
socketsByChannelId[channelId] = null;
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
err => _logError(err,
'failed to send channel close message',
'receiveChannelData'));
});
socketsByChannelId[channelId] = socket;
}
socket.write(payload);
}
function browserAccessChangeHandler() {
if (!_config.browserAccessEnabled) {
socketsByChannelId.forEach(s => s.close());
}
}
ws.on('open', () => {
connected = true;
logger.info('connected to push server');
metadata.notifyBucketChange(() => {
pushStats({ noCache: true });
});
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
initiatePing();
});
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
ws.on('close', () => {
logger.info('disconnected from push server, reconnecting in 10s');
metadata.notifyBucketChange(null);
_config.removeListener('browser-access-enabled-change',
browserAccessChangeHandler);
setTimeout(startWSManagementClient, 10000, url, token);
connected = false;
if (cbOnce) {
process.nextTick(cbOnce);
}
});
ws.on('error', err => {
connected = false;
logger.error('error from push server connection', {
error: err,
errorMessage: err.message,
});
if (cbOnce) {
process.nextTick(cbOnce, err);
}
});
ws.on('ping', () => {
ws.pong(err => _logError(err, 'failed to send a pong'));
});
ws.on('pong', () => {
initiatePing();
});
ws.on('message', data => {
const log = logger.newRequestLogger();
const message = new ChannelMessageV0(data);
switch (message.getType()) {
case CONFIG_OVERLAY_MESSAGE:
if (!isManagementAgentUsed()) {
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
} else {
if (overlayMessageListener) {
overlayMessageListener(message.getPayload().toString());
}
}
break;
case METRICS_REQUEST_MESSAGE:
pushStats();
break;
case CHANNEL_CLOSE_MESSAGE:
closeChannel(message.getChannelNumber());
break;
case CHANNEL_PAYLOAD_MESSAGE:
// browserAccessEnabled defaults to true unless explicitly false
if (_config.browserAccessEnabled !== false) {
receiveChannelData(
message.getChannelNumber(), message.getPayload());
}
break;
default:
logger.error('unknown message type from push server',
{ messageType: message.getType() });
}
});
}
function addOverlayMessageListener(callback) {
assert(typeof callback === 'function');
overlayMessageListener = callback;
}
function startPushConnectionHealthCheckServer(cb) {
const server = http.createServer((req, res) => {
if (req.url !== '/_/healthcheck') {
res.writeHead(404);
res.write('Not Found');
} else if (connected) {
res.writeHead(200);
res.write('Connected');
} else {
res.writeHead(503);
res.write('Not Connected');
}
res.end();
});
server.listen(_config.port, cb);
}
module.exports = {
createWSAgent,
startWSManagementClient,
startPushConnectionHealthCheckServer,
addOverlayMessageListener,
};

Some files were not shown because too many files have changed in this diff Show More