Compare commits

...

497 Commits

Author SHA1 Message Date
Vitaliy Filippov b5711e9cbf Use fs.readFileSync to read config file instead of require 2024-08-13 11:19:38 +03:00
Vitaliy Filippov 36dc6298d2 Use webpack to pack 2024-08-13 02:20:08 +03:00
Vitaliy Filippov bc2d637578 Add installation instructions for Vitastor backend 2024-08-12 01:36:42 +03:00
Vitaliy Filippov b543695048 Add example Vitastor backend configs 2024-08-11 17:24:05 +03:00
Vitaliy Filippov 90024d044d Configure "legacy" werelogs because otherwise MultipleBackendGateway was skipping messages 2024-08-04 01:22:48 +03:00
Vitaliy Filippov 451ab33f68 Use config.workers instead of config.clusters 2024-08-03 14:10:39 +03:00
Vitaliy Filippov c86107e912 Add authdata config file reference to config.json 2024-08-03 01:36:01 +03:00
Vitaliy Filippov 0a5962f256 Require scality kms only if kms backend is scality 2024-08-03 01:29:04 +03:00
Vitaliy Filippov 0e292791c6 Setup backends in config.json 2024-08-02 01:45:38 +03:00
Vitaliy Filippov fc07729bd0 Use ^versions 2024-08-02 01:44:13 +03:00
Vitaliy Filippov 4527dd6795 Do not store actual configs in git 2024-08-01 15:52:02 +03:00
Vitaliy Filippov 05fb581023 Use x-amz-storage-class instead of x-amz-meta-scal-location-constraint
FIXME: Ideally, both locations and storage classes should be supported
2024-07-28 02:00:38 +03:00
Vitaliy Filippov 956739a04e Use internal vaultclient for utapi server 2024-07-23 16:32:48 +03:00
Vitaliy Filippov 7ad0888a66 Change git dependency URLs 2024-07-21 17:36:47 +03:00
Vitaliy Filippov bf01ba4ed1 Change git dependency URLs 2024-07-21 15:26:06 +03:00
Vitaliy Filippov ab019e7e50 Make vaultclient dependency optional 2024-07-21 14:19:54 +03:00
Vitaliy Filippov 3797695e74 Make bucketclient dependency optional 2024-07-18 11:17:05 +03:00
Vitaliy Filippov c8084196c4 Remove remote management 2024-07-16 20:34:11 +03:00
bert-e b72e918ff9 Merge branch 'w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.8/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 22887f47d8 Merge branch 'w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 0cd10a73f3 Merge branch 'w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
bert-e e139406612 Merge branch 'bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
Maha Benzekri d91853a38b
processBucketPolicy fixup for objectDelete
Introduced by https://github.com/scality/cloudserver/pull/5580
we now do send a requestContext with no specific resource instead
of "null", which results in a policy evaluation error.
As we get an implicit deny for the requestType "objectDelete",
cause the processed result to be false , thus sending an empty
array of objects to vault , resulting in a deny even when the policy
allows the action on specific objects.

Linked Issue : https://scality.atlassian.net/browse/CLDSRV-555
2024-07-15 14:20:08 +02:00
Mickael Bourgois a7e798f909
CLDSRV-544: bump version 8.8.27 2024-07-03 19:08:02 +02:00
Mickael Bourgois 3a1ba29869
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-544-stderr' into w/8.8/improvement/CLDSRV-544-stderr 2024-07-03 19:07:41 +02:00
Mickael Bourgois dbb9b6d787
CLDSRV-544: bump version 8.7.48 2024-07-03 18:52:35 +02:00
Mickael Bourgois fce76f0934
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-544-stderr' into w/8.7/improvement/CLDSRV-544-stderr 2024-07-03 18:52:20 +02:00
Mickael Bourgois 0e39aaac09
CLDSRV: bump version 8.6.27 2024-07-03 18:48:28 +02:00
Mickael Bourgois 0b14c93fac
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-544-stderr' into w/8.6/improvement/CLDSRV-544-stderr 2024-07-03 18:48:12 +02:00
Mickael Bourgois ab2960bbf4
CLDSRV-544: bump version 2024-07-01 12:28:23 +02:00
Mickael Bourgois 7305b112e2
Merge remote-tracking branch 'origin/improvement/CLDSRV-544-stderr' into w/7.70/improvement/CLDSRV-544-stderr 2024-07-01 12:28:07 +02:00
Mickael Bourgois cd9e2e757b
CLDSRV-544: bump version 2024-06-30 21:15:52 +02:00
Mickael Bourgois ca0904f584
CLDSRV-544 Add timestamp on stderr utapi v1 2024-06-30 21:15:52 +02:00
Mickael Bourgois 0dd3dd35e6
CLDSRV-544: Add timestamp on stderr
The previous version would not exit the master of the cluster
Now it exits as it should do
2024-06-30 21:15:52 +02:00
bert-e bf7e4b7e23 Merge branch 'w/8.7/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:30 +00:00
bert-e 92f4794727 Merge branch 'w/8.6/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:29 +00:00
Jonathan Gramain c6ef85e3a1 Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-fixup-version' into w/8.6/bugfix/CLDSRV-547-fixup-version 2024-06-27 14:05:27 -07:00
Jonathan Gramain c0fe0cfbcf CLDSRV-547 [fixup] bump version to 7.70.49
Fixup the version, as 7.70.48 was already tagged
2024-06-27 11:42:37 -07:00
bert-e 9c936f2b83 Merge branch 'w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
bert-e d26bac2ebc Merge branch 'w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
Jonathan Gramain cfb9db5178 Merge branch 'w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:53:41 -07:00
Jonathan Gramain 2ce004751a Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:32:45 -07:00
Jonathan Gramain 539219e046 CLDSRV-547 bump cloudserver version 2024-06-27 10:27:45 -07:00
Jonathan Gramain be49e55db5 bf: CLDSRV-547 update redis config for utapi reindex
Update the redis configuration of utapi reindex to include a list of
sentinels, rather than a single sentinel (previously set to
"localhost" in Federation).

I took this opportunity to cleanup tech debt related to parsing redis
configuration, using "joi" for validation instead and making it common
across the three different places where redis config is parsed. Not
doing so would have required yet another copy-paste of dumb and
error-prone validation code. Added unit tests for the new validation.
2024-06-27 10:25:10 -07:00
bert-e e6b240421b Merge branch 'w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.8/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
bert-e 81739e3ecf Merge branch 'w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
Jonathan Gramain c475503248 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-25 18:40:18 -07:00
bert-e 7acbd5d2fb Merge branch 'bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:39:02 +00:00
Jonathan Gramain 8d726322e5 CLDSRV-549 restore 'git.commit-sha' and 'git.repository' labels
Add back the 'git.commit-sha' and 'git.repository' labels to pushed
images, which were not attached anymore after the change of registry.
2024-06-25 18:26:54 -07:00
williamlardier 4f7aa54886 CLDSRV-541: bump project version 2024-06-13 13:58:54 +02:00
williamlardier 0117a5b0b4 CLDSRV-541: add unit test for deleteobjects authz 2024-06-13 13:58:54 +02:00
williamlardier f679831ba2 CLDSRV-541: update unit tests 2024-06-13 13:56:18 +02:00
williamlardier bb162ca7d3 CLDSRV-541: send request context in deleteobjects to get quota information 2024-06-13 11:58:33 +02:00
williamlardier 0c6dfc7b6e CLDSRV-537: bump project version 2024-05-31 13:47:26 +02:00
williamlardier d608d849df CLDSRV-537: bump checkout version for alerts 2024-05-31 13:47:26 +02:00
williamlardier 2cb63f58d4 CLDSRV-537: bump action-prom-render-test version 2024-05-31 13:44:05 +02:00
williamlardier 51585712f4 CLDSRV-537: do not raise quota error if no quota is defined
This ensures fresh installs, or buckets that get empty-ed are
not triggering the alert by mistake
2024-05-31 13:44:05 +02:00
bert-e 61eb24e46f Merge branch 'w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a34b162782 Merge branch 'w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.8/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a9e50fe046 Merge branch 'w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
bert-e 4150a8432e Merge branch 'bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
Taylor McKinnon 7e70ff9cbc Disable git clone protection to work around git bug affecting git-lfs 2024-05-22 10:05:17 -07:00
bert-e 09dc45289c Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:31 +00:00
bert-e 47c628e0e1 Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:30 +00:00
Nicolas Humbert a1f4d3fe8a CLDSRV-529 use shorthand utapi dependency format 2024-05-17 15:10:40 +02:00
williamlardier 926242b077 CLDSRV-553: bump project version 2024-05-17 12:35:59 +02:00
williamlardier aa2aac5db3 CLDSRV-553: functional restore test to simulate cold backend calls 2024-05-17 12:35:59 +02:00
williamlardier f2e2d82e51 CLDSRV-553: unit test the onlyCheckQuota flag 2024-05-17 12:35:59 +02:00
williamlardier 88ad86b0c6 CLDSRV-553: adapt calls to quota evaluation
When the API is being called by a cold backend, the
x-scal-s3-version-id header is set. In this case, the quotas must
be evaluated with a 0 inflight.
2024-05-17 12:35:59 +02:00
bert-e 8f25892247 Merge branch 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:32 +00:00
bert-e 9ac207187b Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:31 +00:00
Anurag Mittal 624a04805f
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-529/bump_utapi' into w/8.6/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:40:00 +02:00
Anurag Mittal ba99933765
Merge remote-tracking branch 'origin/bugfix/CLDSRV-529/bump_utapi' into w/7.70/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:36:36 +02:00
williamlardier 38d1ac1d2c CLDSRV-553: conditionnaly force evaluating quotas with 0 inflight
A corner case was found, where any PUT from the cold backend would
fail if the quota is already exceeded, as the storage was reserved
for the restore, but the restore itself requires some more bytes
as inflights when evaluating quotas. By passing a flag in the quota
evaluation function, we ensure that we can, in these cases,
evaluate the quotas with 0 inflight.
2024-05-17 08:06:35 +02:00
Taylor McKinnon 4f34a34a11 bf(CLDSRV-529): Bump version 2024-05-16 12:19:45 -07:00
Taylor McKinnon 53f2a159fa bf(CLDSRV-529): Bump utapi 2024-05-16 12:18:24 -07:00
Maha Benzekri 63f6a75a86
CLDSRV-530: bump project version 2024-05-10 18:36:01 +02:00
Maha Benzekri 41acc7968e
CLDSRV-530: from accountwithQuota to accountWithQuotaCount 2024-05-10 18:32:07 +02:00
williamlardier c98c5207fc CLDSRV-520: bump project version 2024-05-10 09:51:02 +02:00
williamlardier 615ee393a4 CLDSRV-520: fix federation image with tsc 2024-05-10 09:51:02 +02:00
williamlardier 97dfc699aa CLDSRV-520: bump cloudserver version 2024-05-10 08:12:35 +02:00
williamlardier 76786282d1 CLDSRV-520: deduplicate quota logic 2024-05-10 08:12:35 +02:00
williamlardier a19d6524be CLDSRV-520: generic quota retrieval latency dashboard 2024-05-10 08:12:35 +02:00
williamlardier bbf6dfba22 CLDSRV-520: monitor quota cleanup
The finalization of quota logic will always be executed. Some tests
are added to ensure the inflights are only cleaned when they are
enabled, and an error happened in the API.
In any case, this ensures we monitor quotas in a single place,
for each of the executed action, and compute correctly the total
duration of the quota impact on the API.
2024-05-10 08:11:27 +02:00
williamlardier f0663fd507 CLDSRV-520: add dashboards 2024-05-10 08:11:27 +02:00
williamlardier d4decbbd6c CLDSRV-520: add alerts 2024-05-10 08:11:27 +02:00
williamlardier 288b2b7b87 CLDSRV-520: observe number of buckets and accounts with quota 2024-05-10 08:11:27 +02:00
williamlardier ccf9b62e59 CLDSRV-520: observe metrics during quota evaluations 2024-05-10 08:11:27 +02:00
williamlardier 9fc2d552ae CLDSRV-520: add metrics for quota 2024-05-07 17:56:24 +02:00
williamlardier d7cc4cf7d5 CLDSRV-515: adapt dockerfile for scubaclient 2024-05-07 16:24:25 +02:00
williamlardier 334d33ef44 CLDSRV-515: unit testing 2024-05-07 16:24:25 +02:00
williamlardier 989b0214d9 CLDSRV-515: functional testing 2024-05-07 16:21:13 +02:00
williamlardier 04d0730f97 CLDSRV-515: clear inflights in case of quota exceeded
- If the quotas are evaluated with success and inflights are
  enabled, it means the quota service will store the information
  and persist it till the next update of the utilization metrics.
  In this case, aany API that will fail after authorization would
  still mean that the bytes are considered, even if nothing was
  written. To overcome that, we call a function from the quota
  evaluation logic to erase anything that we wrote during the
  authorization.
2024-05-07 16:21:13 +02:00
williamlardier fbc642c022 CLDSRV-515: evaluate quotas
Quotas are evaluated:
- As part of the authorization process, after both the bucket and
  the object are authorized. The checks are skipped if the API does
  not need any quota evaluation, if the inflight bytes are 0 (i.e.,
  no data added, so no need to check the quota).
- The Copy APIs will evaluate the quotas when the source object is
  checked. In this particular case, the action is objectGet, so a
  flag is passed to force the quota evaluation logic. A subsequent
  check is done in the logic.
- The restoreObject API has a special case where the extension of
  the restoration duration would still cause the evaluation of the
  quotas, causing a potential increase in the inflights stored. We
  detect this case and remove any added inflight.
2024-05-07 16:21:13 +02:00
williamlardier 104435f0b6 CLDSRV-515: implement the quota logic as an helper file 2024-05-07 16:21:13 +02:00
williamlardier a362ac202e CLDSRV-515: bootstrap scuba on startup 2024-05-07 16:21:13 +02:00
williamlardier 1277e58150 CLDSRV-515: create a wrapper for scubaclient and quota service 2024-05-07 16:21:13 +02:00
williamlardier 7727ccf5f0 CLDSRV-515: add configuration for quotas
- Quota service is generic. We only support scuba backend now,
  but we can add others later, if needed, as long as they share
  the same implementation as the scuba client.
- Scuba configuration is passed for the scubaclient tool.
- Ability to disable the inflights is provided. This changes the
  behavior of the quota checks, so that the inflights won't be
  part of the request to the utilization metrics services. This
  reduces the complexity of the quota evaluation logic in case
  of error, as no cleanup will be needed in this case. This,
  however, requires a backend that can provide up to date metrics
  (i.e., <2s).
2024-05-05 15:31:34 +02:00
williamlardier 71860fc90c CLDSRV-515: do not recreate variable at every authz 2024-05-05 15:31:04 +02:00
williamlardier e504b52de7 CLDSRV-515: bump arsenal and vaultclient, introduce scubaclient 2024-05-02 15:09:23 +02:00
Maha Benzekri b369a47c4d CLDSRV-516: add tests 2024-05-02 14:44:31 +02:00
Maha Benzekri b4fa81e832 CLDSRV-516: implement BucketDeleteQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 1e03d53879 CLDSRV-516: implement BucketGetQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 63e502d419 CLDSRV-516: implement UpdateBucketQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri d2a31dc20a CLDSRV-516: specify the signature version of old auth tests
This is unrelated to the quotas, but blocks the CI.
2024-05-02 14:44:28 +02:00
Maha Benzekri f24411875f CLDSRV-516: introduce quota APIs in router 2024-05-02 14:28:56 +02:00
Maha Benzekri 4fd7faa6a3 CLDSRV-516: bump arsenal version 2024-05-02 14:27:44 +02:00
Francois Ferrand 118aaba702
Use sproxyd from ghcr
Issue: CLDSRV-524
2024-04-18 20:38:37 +02:00
Francois Ferrand e4442fdc52
Merge branch 'w/8.7/improvement/CLDSRV-524' into w/8.8/improvement/CLDSRV-524 2024-04-16 18:36:03 +02:00
Francois Ferrand 7fa199741f
Merge branch 'w/8.6/improvement/CLDSRV-524' into w/8.7/improvement/CLDSRV-524 2024-04-16 18:35:32 +02:00
Francois Ferrand f7f95af78f
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 18:34:49 +02:00
Francois Ferrand 2dc053a784
Merge branch 'w/7.70/improvement/CLDSRV-524' into w/8.6/improvement/CLDSRV-524 2024-04-16 17:57:54 +02:00
Francois Ferrand cc9bb9047e
Merge branch 'improvement/CLDSRV-524' into w/7.70/improvement/CLDSRV-524 2024-04-16 16:58:57 +02:00
Francois Ferrand b824fc0828
Use official docker build steps
The docker-build step from `scality/workflows/` fails to login to
 ghcr, as it picks up the old registry creds.

Issue: CLDSRV-524
2024-04-16 16:54:51 +02:00
Francois Ferrand a2e6d91cf2
Build pykmip image
Issue: CLDSRV-524
2024-04-16 16:54:41 +02:00
Francois Ferrand c1060853dd
Upgrade actions
- artifacts@v4
- cache@v4
- checkout@v4
- codeql@v3
- dependency-review@v4
- login@v3
- setup-buildx@v3
- setup-node@v4
- setup-python@v5

Issue: CLDSRV-524
2024-04-16 16:54:23 +02:00
Francois Ferrand 227d6edd09
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 16:54:02 +02:00
bert-e b4754c68ea Merge branches 'w/8.8/bugfix/CLDSRV-518/duplication' and 'q/5548/8.7/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.8 2024-03-25 12:56:17 +00:00
bert-e 11aea5d93b Merge branches 'w/8.7/bugfix/CLDSRV-518/duplication' and 'q/5548/8.6/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.7 2024-03-25 12:56:17 +00:00
Nicolas Humbert a22719ed47 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-518/duplication' into w/8.8/bugfix/CLDSRV-518/duplication 2024-03-20 08:48:00 +01:00
Nicolas Humbert 41975d539d Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-518/duplication' into w/8.7/bugfix/CLDSRV-518/duplication 2024-03-19 18:12:42 +01:00
bert-e 8796bf0f44 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 735fcd04ef Merge branch 'w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 1dee707eb8 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 17:36:39 +00:00
Jonathan Gramain 2c8d69c20a Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 10:18:39 -07:00
bert-e 9dc34f2155 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:14 +00:00
bert-e 08a4c3ade3 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:13 +00:00
bert-e 5435c14116 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:40 +00:00
bert-e 38c44ea874 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:39 +00:00
bert-e 5472d0da59 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
bert-e cdc0bb1128 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
KillianG 39cba3ee6c
Merge remote-tracking branch 'origin/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust' into w/8.8/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust 2024-02-27 11:31:55 +01:00
KillianG a00952712f
Bump 8.7.47
Issue: CLDSRV-512
2024-02-27 10:41:34 +01:00
KillianG a246e18e17
Updatest test for startRestore
Issue: CLDSRV-512
2024-02-27 10:26:19 +01:00
KillianG 3bb3a4d161
Use scaledMsPerDay when restore-adjust
Use scaledMsPerday when restoring an object that has already been restored to be able to make the time go faster for testing purpose

Issue: CLDSRV-512
2024-02-27 10:26:11 +01:00
bert-e c6ba7f981e Merge branches 'w/8.8/bugfix/CLDSRV-498/null' and 'q/5526/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.8 2024-02-21 13:57:14 +00:00
bert-e 762ae5a0ff Merge branches 'w/8.7/bugfix/CLDSRV-498/null' and 'q/5526/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.7 2024-02-21 13:57:13 +00:00
bert-e 3205d117f5 Merge branches 'w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.8 2024-02-20 13:05:07 +00:00
bert-e 4cab3c84f3 Merge branches 'w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.7 2024-02-20 13:05:06 +00:00
williamlardier 0dcc93cdbe Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:49:56 +01:00
williamlardier 2f2f91d6e8 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:48:05 +01:00
bert-e 1433973e5c Merge branch 'w/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 201170b1ed Merge branch 'w/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 242b2ec85a Merge branches 'w/8.8/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.7/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.8 2024-02-19 15:00:59 +00:00
bert-e 3186a97113 Merge branches 'w/8.7/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.6/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.7 2024-02-19 15:00:59 +00:00
Will Toozs 0118dfabbb
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-505-ip-handling-fix' into w/8.8/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:40:58 +01:00
Will Toozs ff40dfaadf
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-505-ip-handling-fix' into w/8.7/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:25:18 +01:00
bert-e 9c99a6980f Merge branches 'w/8.8/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.8 2024-02-19 10:16:26 +00:00
bert-e d4e255781b Merge branches 'w/8.7/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.7 2024-02-19 10:16:26 +00:00
bert-e 1afaaec0ac Merge branch 'w/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.8/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:24 +00:00
bert-e e20e458971 Merge branch 'w/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.7/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:23 +00:00
bert-e bef9220032 Merge branches 'w/8.8/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.8 2024-02-15 18:43:31 +00:00
bert-e de20f1efdc Merge branches 'w/8.7/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.6/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.7 2024-02-15 18:43:31 +00:00
bert-e b89d19c9f8 Merge branch 'w/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:58:27 +00:00
Nicolas Humbert 4dc9788629 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-497/putmetadata' into w/8.7/bugfix/CLDSRV-497/putmetadata 2024-02-15 18:43:28 +01:00
bert-e 06dc042154 Merge branches 'w/8.8/improvement/CLDSRV-502' and 'q/5528/8.7/improvement/CLDSRV-502' into tmp/octopus/q/8.8 2024-02-08 13:49:18 +00:00
bert-e aa4643644a Merge branches 'w/8.7/improvement/CLDSRV-502' and 'q/5528/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.7 2024-02-08 13:49:18 +00:00
Francois Ferrand 4c7d3ae4bc
Merge branch 'w/8.7/improvement/CLDSRV-502' into w/8.8/improvement/CLDSRV-502 2024-02-05 18:50:27 +01:00
Francois Ferrand 23883dae8b
Merge branch 'w/8.6/improvement/CLDSRV-502' into w/8.7/improvement/CLDSRV-502 2024-02-05 18:50:12 +01:00
Francois Ferrand 531c83a359
Release 8.8.17
Issue: CLDSRV-500
2024-02-05 17:35:43 +01:00
Francois Ferrand b84fa851f7
Merge branch 'w/8.7/bugfix/CLDSRV-500' into w/8.8/bugfix/CLDSRV-500 2024-02-05 17:35:20 +01:00
Francois Ferrand 4cb1a879f7
Release 8.7.44
Issue: CLDSRV-500
2024-02-05 17:34:45 +01:00
Francois Ferrand 7ae55b20e7
Merge branch 'bugfix/CLDSRV-500' into w/8.7/bugfix/CLDSRV-500 2024-02-05 17:32:53 +01:00
Hervé Dombya 363afcd17f CLDSRV-473: fix cors issues in getVeeamFile 2024-01-26 15:59:10 +01:00
Frédéric Meinnel 1cf0250ce9 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.8/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:06:05 +01:00
Frédéric Meinnel 20d0b38d0b Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:05:39 +01:00
Frédéric Meinnel 601619f200 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.8/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:24:05 +01:00
Frédéric Meinnel a92e71fd50 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:22:55 +01:00
bert-e 43f62b847c Merge branch 'w/8.7/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.8/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e a031905bba Merge branch 'w/8.6/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.7/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e cd2406b827 Merge branches 'w/8.8/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.8 2024-01-15 09:47:24 +00:00
bert-e 62f707caff Merge branches 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.7 2024-01-15 09:47:23 +00:00
bert-e 848bf318fe Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:46 +00:00
bert-e 0beb48a1fd Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:45 +00:00
Will Toozs d274acd8ed
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-436-bump-version' into w/8.8/improvement/CLDSRV-436-bump-version 2024-01-11 13:10:57 +01:00
Will Toozs e6d9e8fc35
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-436-bump-version' into w/8.7/improvement/CLDSRV-436-bump-version 2024-01-11 11:50:25 +01:00
bert-e 7bb004586d Merge branch 'w/8.7/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.8/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:49 +00:00
bert-e d48de67723 Merge branch 'w/8.6/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.7/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:48 +00:00
bert-e b141c59bb7 Merge branch 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 0b79ecd942 Merge branch 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 10ca6b98fa Merge branch 'w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.8/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 171925732f Merge branch 'w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 70e8b20af9 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 0ec5f4fee5 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e e600677545 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 72e5da10b7 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 759817c5a0 Merge branch 'w/8.7/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e 035c7e8d7f Merge branch 'w/8.6/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e de27a5b88e Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e a4cc5e45f3 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e 9a8b707e82 Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:32 +00:00
bert-e 002dbe0019 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e d803bdcadc Merge branch 'w/8.7/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.8/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:57 +00:00
bert-e 4f1b8f25b7 Merge branch 'w/8.6/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.7/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e e969eeaa20 Merge branches 'w/8.8/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.8 2024-01-05 11:24:59 +00:00
bert-e 2ee78bcf6a Merge branches 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.7 2024-01-05 11:24:58 +00:00
bert-e f31fe2f2bf Merge branch 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.8/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
bert-e ee47cece90 Merge branch 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.7/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
Mickael Bourgois 2d50a76923
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-486-object-redirect-root' into w/8.8/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:59:20 +01:00
Mickael Bourgois 6b4f10ae56
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-486-object-redirect-root' into w/8.7/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:57:36 +01:00
williamlardier dbda5f16a6 CLDSRV-407: bump mongodb to v5.0 in CI 2024-01-04 14:04:20 +01:00
Maha Benzekri 2959c950dd
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.8/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:36:20 +01:00
Maha Benzekri 462ddf7ef1
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:34:44 +01:00
Jonathan Gramain ea7b69e313 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:26:27 -08:00
Jonathan Gramain 8ec1c2f2db Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:08:40 -08:00
bert-e 43f9606598 Merge branch 'w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:02 +00:00
bert-e be34e5ad59 Merge branch 'w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:01 +00:00
Mickael Bourgois 3ce869cea3
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-479-website-fqdn-index' into w/8.8/bugfix/CLDSRV-479-website-fqdn-index
# Conflicts:
#	package.json
2024-01-02 11:40:28 +01:00
Mickael Bourgois b7960784db
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-479-website-fqdn-index' into w/8.7/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:35:36 +01:00
bert-e bf235f3335 Merge branch 'w/8.7/bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.8/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:53 +00:00
bert-e 569c9f4368 Merge branch 'bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:52 +00:00
bert-e 1a3cb8108c Merge branch 'q/5495/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 042120b17e Merge branch 'q/5495/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e ba4593592d Merge branch 'w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 6efdb627da Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e e5b692f3db Merge branch 'w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.8/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:49 +00:00
bert-e 548ae8cd12 Merge branch 'w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:48 +00:00
bert-e 2a919af071 Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:47 +00:00
bert-e 5c300b8b6c Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:46 +00:00
Maha Benzekri 99068e7265
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:36:17 +01:00
Maha Benzekri cd039d8133
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update
In this commit the only api change compared to the 8.6 is the
routeVeeam.
2023-12-14 17:33:03 +01:00
Maha Benzekri 75b293df8d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.8/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:37:14 +01:00
Maha Benzekri a855e38998
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:35:02 +01:00
Maha Benzekri ffe4ea4afe
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.8/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 14:47:24 +01:00
Maha Benzekri a16cfad0fc
CLDSRV-474: mongodb_image on all jobs 2023-12-12 14:06:02 +01:00
bert-e 556163e3e9 Merge branch 'w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:55:56 +00:00
Maha Benzekri 869d554e43
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.8/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:42:25 +01:00
Maha Benzekri 2f8b228595
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:39:20 +01:00
Maha Benzekri e44b7ed918
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 12:00:50 +01:00
Maha Benzekri 3cb29f7f8e
CLDSRV-429: version bump for version release 2023-12-05 12:00:09 +01:00
Maha Benzekri 4f08a4dff2
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 11:58:27 +01:00
Maha Benzekri 15a1aa7965
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 10:58:04 +01:00
Maha Benzekri 4470ee9125
CLDSRV-429: version bump for release 2023-12-05 10:55:31 +01:00
Francois Ferrand d8c12597ea
Release cloudserver 8.8.7
Issue: CLDSRV-471
2023-12-01 19:03:38 +01:00
Francois Ferrand c8eb9025fa
Merge remote-tracking branch 'origin/improvement/CLDSRV-471' into w/8.8/improvement/CLDSRV-471 2023-12-01 19:03:17 +01:00
Francois Ferrand 57e0f71e6a
Release cloudserver 8.7.33
Issue: CLDSRV-471
2023-12-01 19:01:30 +01:00
Francois Ferrand f22f920ee2
Bump arsenal 8.1.115
Issue: CLDSRV-471
2023-12-01 18:42:26 +01:00
Maha Benzekri ed1bb6301d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:31:50 +01:00
Maha Benzekri 70dfa5b11b
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:29:14 +01:00
Francois Ferrand a4e6f9d034
Add lifecycle restore duration metrics
Issue: CLDSRV-471
2023-11-30 14:55:01 +01:00
Maha Benzekri cf94b9de6a
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:23:08 +01:00
Maha Benzekri da0492d2bb
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:22:32 +01:00
Maha Benzekri 979b9065ed
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:19:27 +01:00
Maha Benzekri d5a3923f74
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:18:06 +01:00
bert-e bc291fe3a7 Merge branches 'w/8.8/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.8 2023-11-27 17:16:14 +00:00
bert-e 8dc7432c51 Merge branches 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.7 2023-11-27 17:16:14 +00:00
bert-e 6f963bdcd9 Merge branch 'w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:56 +00:00
bert-e cd9024fd32 Merge branch 'w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:55 +00:00
bert-e dff7610060 Merge branch 'w/8.7/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.8/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:07 +00:00
bert-e 757c2537ef Merge branch 'w/8.6/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.7/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:06 +00:00
bert-e 4515b2adbf Merge branch 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
bert-e 50ffdd260b Merge branch 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
bert-e b5f22d8c68 Merge branches 'w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.8 2023-11-16 19:43:14 +00:00
bert-e 68ff54d49a Merge branches 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.7 2023-11-16 19:43:13 +00:00
bert-e 3fe5579c80 Merge branch 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:08 +00:00
bert-e 3fdd2bce21 Merge branch 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:07 +00:00
bert-e c9b512174f Merge branches 'w/8.8/bugfix/CLDSRV-460-forward-system-signals' and 'q/5431/8.7/bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/q/8.8 2023-11-15 10:14:18 +00:00
bert-e 7b48624cf7 Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into q/8.7 2023-11-15 10:14:17 +00:00
bert-e 55b07def2e Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/w/8.8/bugfix/CLDSRV-460-forward-system-signals 2023-11-15 09:43:35 +00:00
bert-e fcc9468b63 Merge branch 'w/8.6/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.7/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e efc44a620d Merge branch 'w/8.7/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.8/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e 1bc19b39d7 Merge branches 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.7 2023-11-13 17:20:17 +00:00
bert-e b5fa3a1fd3 Merge branches 'w/8.8/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.8 2023-11-13 17:20:17 +00:00
bert-e c0fc958365 Merge branch 'w/8.7/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.8/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:26 +00:00
bert-e d3c74d2c16 Merge branch 'w/8.6/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.7/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:25 +00:00
Kerkesni 07eda89a3f
forward system signals to the node process using tini
npm run doesn’t handle signal forwarding and crashes
on the SIGTERM signal sent by Kubernetes.

Tini spawns a process at PID 1 that handles forwarding
system signals to all it's child processes.

Issue: CLDSRV-460
2023-11-13 12:07:29 +01:00
bert-e 27b4066ca4 Merge branch 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.7/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 2ee5b356fa Merge branch 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.8/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e f5d3433413 Merge branches 'w/8.8/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.8 2023-11-09 17:31:36 +00:00
bert-e 62b4b9bc25 Merge branches 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.7 2023-11-09 17:31:35 +00:00
bert-e ec56c77881 Merge branch 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.8/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e d0abde3962 Merge branch 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.7/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e fdc682f2db Merge branches 'w/8.8/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.7/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.8 2023-11-07 09:32:43 +00:00
bert-e b184606dc2 Merge branches 'w/8.7/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.6/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.7 2023-11-07 09:32:43 +00:00
Maha Benzekri 9ce0f2c2b6
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-424-apicall-auth-update' into w/8.8/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:20:41 +01:00
Maha Benzekri 43b4e0c713
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-424-apicall-auth-update' into w/8.7/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:18:48 +01:00
bert-e 9185f16554 Merge branch 'w/8.7/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.8/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
bert-e 2df9a57f9c Merge branch 'w/8.6/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.7/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
bert-e 68535f83d6 Merge branches 'w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.8 2023-10-24 18:40:33 +00:00
bert-e 41d63650be Merge branches 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.7 2023-10-24 18:40:32 +00:00
bert-e 12185f7c3b Merge branches 'w/8.8/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.7/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.8 2023-10-19 20:36:18 +00:00
bert-e 5f82ee2d0e Merge branches 'w/8.7/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.6/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.7 2023-10-19 20:36:18 +00:00
Taylor McKinnon d72bc5c6b9 Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-449/pass_overhead_fields' into w/8.8/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 13:16:26 -07:00
Taylor McKinnon 0e47810963 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-449/pass_overhead_fields' into w/8.7/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 12:40:23 -07:00
bert-e 3b36cef85f Merge branch 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 18:57:46 +00:00
Jonathan Gramain 114b885c7f Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:35:23 -07:00
williamlardier 3b95c033d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal' into w/8.8/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal 2023-10-06 17:59:22 +02:00
williamlardier 04091dc316 CLDSRV-457: bump project version 2023-10-06 14:54:35 +02:00
williamlardier 56023a80ed CLDSRV-457: bump arsenal 2023-10-06 14:54:34 +02:00
bert-e 2deaebd89a Merge branch 'w/8.7/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.8/bugfix/CLDSRV-455/skip 2023-10-05 16:41:46 +00:00
bert-e c706ccf9c6 Merge branch 'w/8.6/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.7/bugfix/CLDSRV-455/skip 2023-10-05 16:41:45 +00:00
Francois Ferrand 583ea8490f
Bump 8.8.3
Issue: CLDSRV-454
2023-10-04 11:18:25 +02:00
bert-e 85a9480793 Merge branch 'w/8.8/improvement/CLDSRV-446/bump' into tmp/octopus/q/8.8 2023-10-03 10:44:50 +00:00
bert-e be2f65b69e Merge branch 'bugfix/CLDSRV-423-test-sproxyd' into q/8.8 2023-10-03 10:12:16 +00:00
bert-e 1ee6d0a87d Merge branch 'w/8.7/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.8/improvement/CLDSRV-446/bump 2023-10-02 15:25:13 +00:00
bert-e 224af9a5d2 Merge branch 'w/8.6/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-446/bump 2023-10-02 15:25:12 +00:00
bert-e 74f05377f0 Merge branch 'w/8.7/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.8/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:08 +00:00
bert-e 111e14cc89 Merge branch 'w/8.6/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.7/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:07 +00:00
Florent Monjalet 00b20f00d1 Merge remote-tracking branch 'origin/development/8.8' into bugfix/CLDSRV-423-test-sproxyd 2023-10-02 13:45:58 +02:00
Florent Monjalet a91d53a12c CLDSRV-423: test distinct and overwriting PUTs 2023-09-27 11:58:20 +02:00
Florent Monjalet 63d2637046 CLDSRV-423: improve async series usage in test 2023-09-27 11:50:44 +02:00
Maha Benzekri 5d416ad190
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-444-id-resource-policy' into w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:49:03 +02:00
Maha Benzekri ff29cda03f
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-444-id-resource-policy' into w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:47:33 +02:00
Florent Monjalet cb8baf2dab CLDSRV-423: provide a proper dockerfile for test sproxyd 2023-09-27 11:36:49 +02:00
bert-e 22f470c6eb Merge branch 'w/8.7/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:17 +00:00
bert-e e510473116 Merge branch 'w/8.6/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:16 +00:00
Florent Monjalet 17a6808fe4 CLDSRV-423: bump arsenal and sproxydclient to fix SPRXCLT-12 2023-08-31 19:07:44 +02:00
Florent Monjalet df646e4802 CLDSRV-423: disable failing tests that have just been reenabled
They had been disabled for a long while and cannot be reenabled yet
because they don't pass, so keep on skipping them for now.

Tickets have been created to take care of them:

- CLDSRV-440
- CLDSRV-441
- CLDSRV-442
- CLDSRV-443
2023-08-31 19:06:34 +02:00
Florent Monjalet 267770d256 CLDSRV-423: reproduce SPRXCLT-12 more often 2023-08-31 19:06:34 +02:00
Florent Monjalet 1b92dc2c05 CLDSRV-423: perform two successive put in multiple backend tests
This tests for SPRXCLT-12 issue
2023-08-31 19:06:34 +02:00
Florent Monjalet f80bb2f34b CLDSRV-423: don't run sproxyd test when testing Ceph 2023-08-31 19:06:34 +02:00
Florent Monjalet 4f89b67bb9 CLDSRV-423: Add missing mock logger method 2023-08-31 19:06:34 +02:00
Florent Monjalet 8b5630923c CLDSRV-423: refactor multiple backend put tests to avoid duplication 2023-08-31 19:06:34 +02:00
Florent Monjalet 9ff5e376e5 CLDSRV-423: reenable a good chunk of multiple backend tests 2023-08-31 19:06:34 +02:00
Florent Monjalet a9b5a2e3a4 CLDSRV-423: add put test for sproxyd 2023-08-31 19:06:34 +02:00
Florent Monjalet 7e9ec22ae3 CLDSRV-423: deploy sproxyd for multiple backend tests 2023-08-31 19:06:34 +02:00
bert-e 9d4664ae06 Merge branch 'w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.8/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e 662265ba2e Merge branch 'w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
Taylor McKinnon 17e4f14f9c Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-413/bump_version' into w/8.8/bugfix/CLDSRV-413/bump_version 2023-08-18 10:10:01 -07:00
Taylor McKinnon 014b071536 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-413/bump_version' into w/8.7/bugfix/CLDSRV-413/bump_version 2023-08-18 10:07:14 -07:00
bert-e 2d45f92ae1 Merge branches 'w/8.8/feature/CLDSRV-420/backport' and 'q/5268/8.7/feature/CLDSRV-420/backport' into tmp/octopus/q/8.8 2023-08-18 14:53:18 +00:00
bert-e 48452496fa Merge branches 'w/8.7/feature/CLDSRV-420/backport' and 'q/5268/8.6/feature/CLDSRV-420/backport' into tmp/octopus/q/8.7 2023-08-18 14:53:18 +00:00
bert-e 18bf6b8d4a Merge branch 'w/8.7/feature/CLDSRV-420/backport' into tmp/octopus/w/8.8/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
bert-e 858c31a542 Merge branch 'w/8.6/feature/CLDSRV-420/backport' into tmp/octopus/w/8.7/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
bert-e 19d3e0bc9d Merge branch 'w/8.7/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.8/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:03:00 +00:00
bert-e bac044dc8f Merge branch 'w/8.6/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.7/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:02:59 +00:00
bert-e 8c0f709014 Merge branch 'bugfix/CLDSRV-422' into tmp/octopus/w/8.8/bugfix/CLDSRV-422 2023-08-16 11:46:43 +00:00
Francois Ferrand ce92d33a5d
Fix use of http_requests_total metrics
It was missed when metric names were updated. In addition, the dashboard
was not up-to-date with the python source, and needed to be regenerated.

Issue: CLDSRV-422
2023-08-14 14:36:14 +02:00
Kerkesni 0381cce85c
Merge remote-tracking branch 'origin/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring' into w/8.8/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring 2023-08-10 16:07:42 +02:00
Kerkesni 20a08a2a4e
bump version to 8.7.26 2023-08-10 16:04:25 +02:00
Kerkesni ff73d8ab12
add tests for keeping object properties after restore
Issue: CLDSRV-408
2023-08-10 16:03:58 +02:00
Kerkesni 1ee44bc6d3
keep same object properties after a restore of a cold object
Object properties such as ACLs and custom user metadata should
not be removed after the restore of a cold object.

Issue: CLDSRV-408
2023-08-10 12:58:18 +02:00
bert-e 614e876536 Merge branches 'w/8.8/improvement/CLDSRV-400' and 'q/5191/8.7/improvement/CLDSRV-400' into tmp/octopus/q/8.8 2023-08-09 16:42:42 +00:00
bert-e b40a77d94b Merge branch 'improvement/CLDSRV-400' into q/8.7 2023-08-09 16:42:42 +00:00
bert-e 3a3a73b756 Merge branch 'improvement/CLDSRV-400' into tmp/octopus/w/8.8/improvement/CLDSRV-400 2023-08-09 16:19:33 +00:00
bert-e 3f6e85590d Merge branches 'w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.8 2023-08-07 17:27:19 +00:00
bert-e bc009945d2 Merge branches 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.7 2023-08-07 17:27:18 +00:00
bert-e 3ac30d9bab Merge branch 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e 32204fbfbf Merge branch 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e 5a26e1a80d Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 507a2d4ff5 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 1207a6fb70 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e 5883286864 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e 2a37e809d9 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:22 +00:00
bert-e 86ce7691cd Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
bert-e e466b5e92a Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:16 +00:00
bert-e a4bc10f730 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
Nicolas Humbert c480301e95 Merge remote-tracking branch 'origin/improvement/CLDSRV-414/bump' into w/8.8/improvement/CLDSRV-414/bump 2023-07-14 15:52:57 -04:00
Nicolas Humbert 276be285cc CLDSRV-414 bump version 2023-07-14 15:47:13 -04:00
bert-e 897d41392a Merge branch 'w/8.7/bugfix/CLDSRV-412/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-412/null 2023-07-14 14:08:38 +00:00
bert-e f4e3a19d61 Merge branch 'bugfix/CLDSRV-412/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-412/null 2023-07-14 14:08:37 +00:00
williamlardier 7c52fcbbb0
CLDSRV-402: bump project version 2023-07-13 17:45:06 +02:00
bert-e da52688a39 Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:26 +00:00
bert-e 1cb54a66f8 Merge branch 'w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:25 +00:00
bert-e d9fffdad9e Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 13:08:26 +00:00
williamlardier 389c32f819
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:06:34 +02:00
Kerkesni d26b8bcfcc
test keeping same storage class when restoring a cold object
Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
Kerkesni e4634621ee
keep storage class as cold for restored objects
To be compliant with the AWS S3 standard, the storage class
of restored objects should be left as cold location

Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
williamlardier 0b58b3ad2a
CLDSRV390: bump mongodb to 4.4 2023-06-22 16:56:53 +02:00
bert-e 652bf92536 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:43 +00:00
bert-e 344ee8a014 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
bert-e b7e7f65d52 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:37:53 +00:00
bert-e c5b7450a4d Merge branches 'w/8.7/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.7 2023-06-12 22:01:22 +00:00
Nicolas Humbert 18c8d4ecac CLDSRV-404 bump version 2023-06-09 11:48:56 -04:00
Nicolas Humbert c8150c6857 CLDSRV-397 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-08 12:14:36 -04:00
bert-e 399a2a53ab Merge branch 'improvement/CLDSRV-399/addWorkflowDispatch' into q/8.7 2023-06-05 20:39:18 +00:00
Alexander Chan bbad049b5f CLDSRV-399: add workflow_dispatch 2023-06-05 11:30:35 -07:00
bert-e 2a4e2e1584 Merge branch 'w/8.6/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-398/bump 2023-06-02 20:19:28 +00:00
bert-e b304d05614 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:27 +00:00
bert-e 004bd63368 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:33 +00:00
Nicolas Humbert 960d736962 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-396/put-metadata-null' into w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 11:24:34 -04:00
KillianG 32401c9a83
bump 8.7.23 2023-05-30 09:40:36 +00:00
KillianG 5f05b676cc
Merge remote-tracking branch 'origin/development/8.7' into HEAD 2023-05-26 09:46:21 +00:00
KillianG fd662a8c2c
Bump arsenal 8.1.101 and test delete markers are not listed when bucket versionning is suspended
Issue: CLDSRV-347
2023-05-26 08:46:42 +00:00
bert-e 5d54dd58be Merge branch 'bugfix/CLDSRV-393' into q/8.7 2023-05-25 19:47:24 +00:00
Nicolas Humbert 1bd0deafcf CLDSRV-395 bump to 8.7.21 2023-05-25 14:02:47 -04:00
Francois Ferrand 7c788d3dbf Bump github actions
Issue: CLDSRV-393
2023-05-25 14:02:47 -04:00
Nicolas Humbert 50cb6a2bf1 CLDSRV-374 putMetadata API route is not updating null version properly
Instead of using the provided "null" value, the metadata "null version id" is now used when updating the metadata of a null version.
2023-05-25 09:40:20 -04:00
bert-e 58f7bb2877 Merge branch 'w/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.7/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
Francois Ferrand ea284508d7
Update x-amz-restore when updating the expiry date
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 0981fa42f3
Add version name in release runs
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 7e63064a52
Bump github actions
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 71b21e40ca
Add eslint rule to prevent exclusive tests
Lint will fail if it finds any `describe.only` or `it.only`.

Issue: CLDSRV-393
2023-05-24 17:14:24 +02:00
Francois Ferrand ff894bb545
Remove describe.only
This should never have been commited, as it disables most unit tests from
CI.

This caused some tests to actually fail:
* bad import of refactored `objectDelete` api
* getting an object while transitioning (archiving) is allowed

Issue: CLDSRV-393
2023-05-24 17:09:33 +02:00
Francois Ferrand ae9f24e1bb
Update expiry date on s3:restore on restored object
If the object is already restored, we simply need to update the expiry
date, as per AWS docs:
> After restoring an archived object, you can update the restoration
> period by reissuing the request with a new period. Amazon S3 updates
> the restoration period relative to the current time.

Issue: CLDSRV-393
2023-05-24 16:52:45 +02:00
bert-e 2dc01ce3ed Merge branch 'w/8.7/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/q/8.7 2023-05-15 16:39:05 +00:00
Kerkesni 9bd9bef6c7
bump version in package.json to 8.7.20
Issue: CLDSRV-386
2023-05-11 10:34:27 +02:00
bert-e a6a5c273d5 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
Kerkesni 6479076fec
bump node version to 16.20 in Dockerfile
Issue: CLDSRV-386
2023-05-10 13:35:54 +02:00
bert-e df45f481d0 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
bert-e cd8c589eba Merge branch 'improvement/CLDSRV-375/exclude-keys' into tmp/octopus/w/8.7/improvement/CLDSRV-375/exclude-keys 2023-04-28 18:20:48 +00:00
williamlardier daec2661ae
CLDSRV-385: use mongodb v4.2 for the CI 2023-04-21 15:03:01 +02:00
Francois Ferrand 0f266371a0
Bump version 8.7.18
Issue: CLDSRV-383
2023-04-17 23:36:28 +02:00
Francois Ferrand 73e56963bf
Fix originOp when deleting a version
DeleteMarkerCreated was sent instead of the expect Delete, which breaks
bucket notifications.

Issue: CLDSRV-383
2023-04-17 23:14:49 +02:00
Alexander Chan fb11d0f42e Merge remote-tracking branch 'origin/feature/CLDSRV-368/addBackbeatRouteForIndexingOps' into w/8.7/feature/CLDSRV-368/addBackbeatRouteForIndexingOps 2023-04-14 18:35:38 -07:00
williamlardier 9cbd9f7be8
CLDSRV-381: bump project version 2023-04-14 22:29:03 +02:00
williamlardier c2fc8873cb
CLDSRV-381: bump arsenal 2023-04-14 22:28:47 +02:00
Francois Ferrand bee1ae04bf
Bump version 8.7.15
Issue: CLDSRV-380
2023-04-14 09:06:04 +02:00
Francois Ferrand eb86552a57
Allow reading transition-in-progress objects
This “transition in progress” state does not exist in AWS S3 (so we have no reference), and we need to access the data for cold storage framework.

When the transition has been performed, the archive id and storage class will be updated first (as well as clearing the ‘transitioning’ flag) before triggering the “GC” to remove the (local) data.

So we are sure that data is available in this state, and that simply checking that the object is in cold storage is enough.

Issue: CLDSRV-380
2023-04-14 09:02:32 +02:00
bert-e f5d8f2fac5 Merge branch 'w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.7/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
bert-e 36e841b542 Merge branches 'w/8.7/feature/CLDSRV-355-activateNullKeys' and 'q/5069/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.7 2023-04-13 18:35:42 +00:00
williamlardier 1d12a430a0
CLDSRV-376: bump to 8.7.13 2023-04-13 16:59:28 +02:00
williamlardier bea27b4fb4
CLDSRV-376: update mongoClient used in functional tests 2023-04-13 16:59:13 +02:00
williamlardier 76405d9179
CLDSRV-376: bump mongodb driver 2023-04-13 16:59:12 +02:00
Alexander Chan 31b7f1e71c bump version 2023-04-12 15:36:00 -07:00
Alexander Chan 8674cac9f8 CLDSRV-379: bump arsenal 2023-04-12 15:35:27 -07:00
KillianG d5b666a246
Better indentation and use bool isExpiration only in the first function, after, uses directly originOp string
Issue: CLDSRV-367
2023-04-11 14:59:02 +00:00
KillianG 4360772971
Improve the way we pass originOp to make it clearer
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 6e152e33d5
Use boolean in parameter instead of hardcoded originOP
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 94f34979a5
add origin op to all delete object calls
Issue: CLDSRV-367
2023-04-11 13:43:36 +00:00
bert-e 4b0f165b46 Merge branches 'w/8.7/improvement/CLDSRV-372/vid' and 'q/5109/8.6/improvement/CLDSRV-372/vid' into tmp/octopus/q/8.7 2023-04-07 18:35:02 +00:00
Nicolas Humbert 3590377554 Merge remote-tracking branch 'origin/improvement/CLDSRV-372/vid' into w/8.7/improvement/CLDSRV-372/vid 2023-04-07 07:58:01 -04:00
bert-e 8a08f97492 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:48 +00:00
bert-e 448afa50e3 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:47 +00:00
bert-e 50b738cfff Merge branch 'w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 951a98fcaf Merge branch 'w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.7/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
bert-e 8ca770dcb7 Merge branch 'w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.7/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:15 +00:00
bert-e 3585b8d5eb Merge branch 'w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 0a1489ee46 Merge branch 'w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.7/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:59 +00:00
Xin LI de5b4331e2 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 11:00:24 +02:00
bert-e 46dff0321d Merge branch 'w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:07 +00:00
bert-e ddc6ea72be Merge branch 'improvement/CLDSRV-371/etag' into tmp/octopus/w/8.7/improvement/CLDSRV-371/etag 2023-03-29 20:22:38 +00:00
bert-e d266ff4e9f Merge branch 'w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.7/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 7dc2f07cb6 Merge branch 'w/8.7/improvement/CLDSRV-366/clear' into tmp/octopus/q/8.7 2023-03-28 13:25:15 +00:00
Kerkesni 6c22d87c55
bump version to 8.7.11
Issue: CLDSRV-362
2023-03-28 12:25:11 +02:00
Kerkesni 310f67d3a7
throw error when getting a transitioning object
Issue: CLDSRV-362
2023-03-28 12:24:50 +02:00
Kerkesni 49841c5e0e
throw error when copying parts from a cold object
A cold object should not be allowed to get copied as the data
is not accessible.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni b5334baca8
throw error when copying a cold or transitioning object
A cold object should not be allowed to get copied as the data
is not accessible.

Same issue happens when copying an object that is transitioning,
the data might get deleted while copying is still in progress.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni e592671b54
add helper to check if object is in cold storage
Issue: CLDSRV-362
2023-03-28 12:24:48 +02:00
bert-e 6e0b66849d Merge branch 'improvement/CLDSRV-366/clear' into tmp/octopus/w/8.7/improvement/CLDSRV-366/clear 2023-03-28 03:45:02 +00:00
bert-e 18a1bfd325 Merge branch 'w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.7/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:13 +00:00
bert-e 2c999f4c10 Merge branch 'w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e bf7a643d45 Merge branch 'w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.7/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:15 +00:00
bert-e 3f3bf0fdf0 Merge branches 'w/8.7/bugfix/CLDSRV-363/etag' and 'q/5078/8.6/bugfix/CLDSRV-363/etag' into tmp/octopus/q/8.7 2023-03-24 18:01:38 +00:00
bert-e 2a44949048 Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 6660626190 Merge branch 'bugfix/CLDSRV-363/etag' into tmp/octopus/w/8.7/bugfix/CLDSRV-363/etag 2023-03-24 13:23:06 +00:00
williamlardier 58fc0b7146
CLDSRV-350: bump to 8.7.10 2023-03-21 13:52:26 +01:00
williamlardier 11e3d7ecb2
CLDSRV-350: update veeam put and delete routes with new arsenal methods
We must ensure that concurrent updates of the bucket metadata won't conflict
with each other, by separately updating the capabilities fields. This change
ensures that two files can be uploaded at the same without any problem,
regardless of the number of cloudserver instances.
2023-03-21 13:52:25 +01:00
williamlardier 1bab851ce3
CLDSRV-350: bump arsenal version 2023-03-21 13:52:25 +01:00
bert-e 0bc0341f33 Merge branch 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e b5b0f6482b Merge branch 'feature/CLDSRV-317/listLifecycleOrphans' into tmp/octopus/w/8.7/feature/CLDSRV-317/listLifecycleOrphans 2023-03-20 13:53:09 +00:00
bert-e 755f282f8e Merge branch 'feature/CLDSRV-316/listLifecycleNonCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-316/listLifecycleNonCurrents 2023-03-17 18:00:21 +00:00
bert-e c4dc928de2 Merge branch 'feature/CLDSRV-314/listLifecycleCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-314/listLifecycleCurrents 2023-03-17 16:20:16 +00:00
Killian Gardahaut a0087e8d77
Bump 8.7.9
Issue: ZKOP-219
2023-03-17 09:58:21 +01:00
KillianG 8e5bea56b6
Refacto tests for more readability
Issue: CLDSRV-337
2023-03-17 09:58:21 +01:00
KillianG 976e349036
Add tests
Adding test for the function azureArchiveLocationConstraintAssert

Issue: CLDSRV-337
2023-03-17 09:58:16 +01:00
KillianG de1c23ac1b
Add test on location constraints to ensure the location is well configured
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 0b4d04a2a3
Add location azure archive to cold storage locations
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 049d396c8d
Add azure_archive location type
ISSUE: CLDSRV-337
2023-03-17 09:56:35 +01:00
Naren 5c04cbe6d1 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-327-cloudserver-metrics' into w/8.7/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:36:59 -07:00
bert-e 5cb63991a8 Merge branch 'w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.7/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 16:30:18 +00:00
Alexander Chan c310cb3dd1 Merge remote-tracking branch 'origin/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.7/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:47:15 -08:00
bert-e 22cda51944 Merge branch 'w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/q/8.7 2023-02-22 16:39:53 +00:00
williamlardier 408d0de732
CLDSRV-343: bump cloudserver to the next version 2023-02-22 09:59:09 +01:00
williamlardier 83916c91fb
CLDSRV-343: enable back some CEPH backend tests
These tests also cover the ObjectTagging API with multiple backend.
Enabling them back will allow us to better avoid issues like this
in the future.
2023-02-17 14:24:59 +01:00
bert-e 110b2a35ed Merge branch 'w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:38 +00:00
williamlardier a8117ca037
CLDSRV-343: use bucket name for backend tagging operations 2023-02-16 15:51:49 +01:00
bert-e 9145d1cf79 Merge branches 'w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.7 2023-02-15 20:43:46 +00:00
bert-e ae1b6dc3d1 Merge branch 'w/8.6/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.7/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e b1304b5f7f Merge branches 'w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.7 2023-02-10 12:57:22 +00:00
bert-e 6b1f8c61ec Merge branch 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:09 +00:00
bert-e 335bfabed1 Merge branch 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 3398db3c0f Merge branch 'w/8.6/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.7/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:19 +00:00
bert-e 836e9fb22d Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:46 +00:00
bert-e ead7f5f7c2 Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:42 +00:00
bert-e c17059dc77 Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e 8ace5b24a5 Merge branches 'development/8.7' and 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:48 +00:00
bert-e 39f7035dbd Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:42 +00:00
williamlardier bb62ed4fa7
CLDSRV-334: bump cloudserver to 8.7.7 2023-01-24 12:33:37 +01:00
williamlardier c95368858d
CLDSRV-334: bump arsenal to 8.1.82 2023-01-24 12:33:17 +01:00
bert-e d8ff1377fc Merge branch 'w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.7/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:31 +00:00
Jonathan Gramain 28f4c5baee Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.7/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:44 -08:00
bert-e 0a8f846f4b Merge branch 'w/8.6/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.7/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
Jonathan Gramain ac5de47ca1 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-312-bumpArsenal' into w/8.7/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 16:03:10 -08:00
williamlardier c147785464
CLDSRV-322: bump cloudserver version 2023-01-06 09:04:04 +01:00
williamlardier ca8c788757
CLDSRV-322: code improvements 2023-01-06 09:04:03 +01:00
williamlardier cb2af364bb
CLDSRV-322: Implement test for custom routes
Unit and funcitonal tests are implemented to test the custom routes.
The LISTing is not yet tested, as it requires more changes to
generate a valid signature, from Mocha.
2023-01-05 15:31:33 +01:00
williamlardier 1eb27d610b
CLDSRV-322: Support custom files for MultiObjectDelete
MultiObjectDelete is implemented by the product UI to delete the
files in buckets. This method is a POST that relies on the request
body to filter the objects, hence, it is not possible to filter
it as an ingress rule in nginx.

The implementation tries to avoid adding any complexity
by extending existing loops, and implementing a new step if elligible
files are found.

These files are extracted from the Veeam route list of accepted files,
but this implementation might change if more custom APIs are supported
in the future.
2023-01-05 15:31:33 +01:00
williamlardier 73b295c91d
CLDSRV-322: Implement LIST for SOSAPI routes
Listing of objects is needed for consistent user experience in the
product's User Interface.

Listing is implemented as a `GET` request with a specific query parameter
`list-type` and folder `.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c`.

This API:
- Handles both versioned and non-versioned listing
- Relies on predefined templates to fill the response content
- Extracts the system.xml and capacity.xml files from the bucket
  metadata
- Computes the listing response based on the input query parameters
  and files currently in the bucket md capabilities
- Handle errors if any non supported query parameter is used. As any GET
  request is routed to this method, we return InvalidRequest if the requested
  action is not supported (i.e., not a listing v2)
2023-01-05 15:31:32 +01:00
williamlardier 8186c84bf9
CLDSRV-322: Implement DELETE for SOSAPI routes
Deletion of Veam SOSAPI files are required to ensure consistent
user experience. This API is only exposed to API/CLI clients, not
User Interface. The User Interface relies on MultiObjectDelete to
perform the deletions, and is handled in a separate commit.

This API:
- Checks that the requested file exists
- Erase the bucket metadata according to the file
- Update the bucket metadata with the updated values.
- Handle errors if the Veaam capability is not yet enabled for the bucket
2023-01-05 15:31:32 +01:00
williamlardier 93ef2d0545
CLDSRV-322: Implement HEAD for SOSAPI routes
HEAD object is not formally required by Veeam SOSAPI, but Veeam
relies on the last-modified date value of the capacity.xml file.
To suppoort any change in future SOSAPI standard, the HEAD method
is implemented, and is similar to the GET method, where only the metadata
are returned.
2023-01-05 15:31:31 +01:00
williamlardier d7d0a31bb1
CLDSRV-322: Implement PUT for SOSAPI routes
In the SOSAPI context, the user is requested to pre-created two files,
system.xml and capacity.xml under the veeam folder, to enable the feature.

This API:
- Extracts the XML from the provided file, convert it to JSON
- Validate that the JSON is valid against joi schemes, if applicable
- Updates the bucket metadata, including the last-modified date
- Update in the database the bucket metadata
- Return the standard success code response
- Handle invalid XML or XML structure, and return an error accordingly
2023-01-05 15:31:31 +01:00
williamlardier 4c69b82508
CLDSRV-322: Implement GET for SOSAPI
The GET method is used by SOSAPI to determine if SOS API is enabled
or not on a bucket.

Two files are supported: system.xml and capacity.xml.

This API:
- Get the bucket metadata
- Dynamically recomputes a valid XML based on the bucket md content
  using xml2js as headless, to enforce the same XML as the one
  from SOSAPI standard
- Rejects the request with an error if the bucket metadata does not
  exist
- Handle the `?tagging` request, required for versioned bucket, to
  return a static content.

Output stream relies on the utils file.
2023-01-05 15:31:30 +01:00
williamlardier ca13284da3
CLDSRV-322: implement common util functions
Custom SOSAPI routes might either retreive or stream data. The utils file
re-implement, with support for this particular context, some functions
from the standard API paths, from Arsenal.

These changes mostly introduce ways to compute the right HTTP headers as
well as input our output streams to handle GET or PUT request types.
2023-01-05 15:31:30 +01:00
williamlardier c6ed75a1d7
CLDSRV-322: implement SOSAPI scheme validator
SOSAPI relies on standard XML files for both the system and the capacity.
It is used by Veeam12+ to determine what capabilities and/or
configuration should be enforced for a given S3-integrated Bucket used
for backups.

The commit introduces scheme validation for JSON objects, as XML will
be first converted using xml2js.

The system.xml file includes the protocol version of SOSAPI: if the
version is not know, no validation is made, to allow for future changes
without formal need to update the product.

Note: maximum XML file size, in case of unsupported protocol version, will
be enforced to avoid spacing issues with the database.
2023-01-05 15:31:30 +01:00
williamlardier 402d0dea1a
CLDSRV-322: Create a new route for Veeam12 SOS API.
This new route is exposed through special nginx rules
from Zenko-Operator, to redirect any call to the veeam
folder, located under .system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c
at the root of the bucket. The goal is to store files in
the bucket metadata, to ease their update by internal jobs.
To avoid impacting standard API, we rely on custom routes
with dedicated logic to handle these files, in a generic
way.

This commit introduces a new route that will manage, in turn,
the:
- Handling of incoming request.
- Validity checks, including list of suppoorted APIs according
  to the HTTP verb and query parameters.
- Authentication and Authorization with Vault, in the same
  way as usual files.
- Check of the targeted bucket and/or keys, to extract the
  bucket metadata.
- Routing of the request to the right API handler.
2023-01-05 15:31:29 +01:00
williamlardier 95faec1db0
CLDSRV-322: bump arsenal version 2023-01-05 15:31:29 +01:00
Jonathan Gramain ca9d53f430 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-321-version-bump' into w/8.7/bugfix/CLDSRV-321-version-bump 2022-12-26 11:19:03 -08:00
bert-e b1ee1f8ef7 Merge branch 'w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.7/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
williamlardier e882cb6781
Merge remote-tracking branch 'origin/bugfix/CLDSRV-320-bump-arsenal' into w/8.7/bugfix/CLDSRV-320-bump-arsenal 2022-12-20 17:44:48 +01:00
Francois Ferrand cb7303636c
Release bump 8.7.1
Issue: CLDSRV-306
2022-12-16 19:56:19 +01:00
Francois Ferrand 6d0f889c23
Merge remote-tracking branch 'origin/feature/CLDSRV-306' into w/8.7/feature/CLDSRV-306 2022-12-16 19:54:23 +01:00
Francois Ferrand c13f2ae6a5
Merge remote-tracking branch 'origin/improvement/CLDSRV-305' into w/8.7/improvement/CLDSRV-305 2022-12-16 18:08:52 +01:00
bert-e b6611c4711 Merge branch 'w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.7/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:48 +00:00
bert-e ae4ece471b Merge branch 'w/8.7/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/q/8.7 2022-12-14 21:19:55 +00:00
williamlardier 15b61cd947
CLDSRV-297: bump cloudserver to 8.7.0 2022-12-14 18:16:55 +01:00
williamlardier 91536c575f
CLDSRV-297: bump projects versions 2022-12-14 18:16:52 +01:00
150 changed files with 9075 additions and 9007 deletions

View File

@ -1,5 +1,8 @@
{
"extends": "scality",
"plugins": [
"mocha"
],
"rules": {
"import/extensions": "off",
"lines-around-directive": "off",
@ -42,7 +45,8 @@
"no-restricted-properties": "off",
"new-parens": "off",
"no-multi-spaces": "off",
"quote-props": "off"
"quote-props": "off",
"mocha/no-exclusive-tests": "error",
},
"parserOptions": {
"ecmaVersion": 2020

View File

@ -16,14 +16,14 @@ runs:
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v2
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: 'yarn'
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v2
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
@ -35,3 +35,9 @@ runs:
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

View File

@ -40,6 +40,11 @@ services:
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
@ -67,14 +72,21 @@ services:
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: registry.scality.com/cloudserver-dev/pykmip
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:
network_mode: "host"
profiles: ['mongo', 'ceph']
image: scality/ci-mongo:3.6.8
image: ${MONGODB_IMAGE}
ceph:
network_mode: "host"
profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

28
.github/docker/mongodb/Dockerfile vendored Normal file
View File

@ -0,0 +1,28 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -0,0 +1,4 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

15
.github/docker/mongodb/conf/mongod.conf vendored Normal file
View File

@ -0,0 +1,15 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

3
.github/docker/sproxyd/Dockerfile vendored Normal file
View File

@ -0,0 +1,3 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -0,0 +1,26 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

88
.github/docker/sproxyd/conf/nginx.conf vendored Normal file
View File

@ -0,0 +1,88 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -0,0 +1,12 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -0,0 +1,43 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

View File

@ -20,13 +20,16 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.1
uses: scality/action-prom-render-test@1.0.3
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: >-
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -14,12 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v3
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v3

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v3
uses: actions/dependency-review-action@v4

View File

@ -10,58 +10,69 @@ on:
required: true
env:
REGISTRY_NAME: registry.scality.com
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: inherit
with:
push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: .
file: images/svc-base/Dockerfile
tag: ${{ github.event.inputs.tag }}-svc-base
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v1
uses: docker/setup-buildx-action@v3
- name: Login to Registry
uses: docker/login-action@v1
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY_NAME }}
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Push dashboards into the production namespace
run: |
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v2
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ github.token }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}

View File

@ -2,6 +2,8 @@
name: tests
on:
workflow_dispatch:
push:
branches-ignore:
- 'development/**'
@ -65,23 +67,24 @@ env:
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- uses: actions/setup-node@v2
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v4
- uses: actions/setup-python@v5
with:
python-version: '3.9'
- uses: actions/cache@v2
- uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
@ -114,7 +117,7 @@ jobs:
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v2
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -125,61 +128,88 @@ jobs:
build:
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v1.10.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Registry
uses: docker/login-action@v1
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
password: ${{ github.token }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v3
uses: docker/build-push-action@v5
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose up -d
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -198,11 +228,12 @@ jobs:
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -216,7 +247,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -236,11 +267,12 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -255,7 +287,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -275,12 +307,13 @@ jobs:
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
@ -297,7 +330,7 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -311,13 +344,14 @@ jobs:
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -329,7 +363,51 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -345,11 +423,13 @@ jobs:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
@ -365,7 +445,7 @@ jobs:
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
@ -373,7 +453,7 @@ jobs:
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
@ -384,17 +464,18 @@ jobs:
CI_CEPH: 'true'
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v1.10.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
@ -442,7 +523,7 @@ jobs:
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v3
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net

View File

@ -1,4 +1,4 @@
ARG NODE_VERSION=16.17.1-bullseye-slim
ARG NODE_VERSION=16.20-bullseye-slim
FROM node:${NODE_VERSION} as builder
@ -23,6 +23,7 @@ RUN apt-get update \
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
################################################################################
@ -42,6 +43,7 @@ EXPOSE 8002
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
@ -53,6 +55,6 @@ COPY --from=builder /usr/src/app/node_modules ./node_modules/
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]

175
README.md
View File

@ -1,10 +1,7 @@
# Zenko CloudServer
# Zenko CloudServer with Vitastor Backend
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -14,137 +11,71 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
## Quick Start with Vitastor
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## Docker
Installation instructions:
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
### Install Vitastor
## Contributing
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
### Install Zenko with Vitastor Backend
## Installation
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Dependencies
### Install and Configure MongoDB
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Clone source code
### Setup Zenko
```shell
git clone https://github.com/scality/S3.git
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
If you get an error regarding installation of the diskUsage module,
please install g++.
# Author & License
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -1,46 +0,0 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,6 +4,7 @@
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
@ -101,6 +102,14 @@
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {

71
config.json.vitastor Normal file
View File

@ -0,0 +1,71 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,7 @@ const constants = {
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
@ -130,7 +130,7 @@ const constants = {
},
},
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true },
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
@ -205,9 +205,6 @@ const constants = {
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
@ -243,6 +240,9 @@ const constants = {
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
};
module.exports = constants;

View File

@ -2,11 +2,12 @@
## Docker Image Generation
Docker images are hosted on [registry.scality.com](registry.scality.com).
CloudServer has two namespaces there:
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
* Production Namespace: registry.scality.com/cloudserver
* Dev Namespace: registry.scality.com/cloudserver-dev
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
@ -18,8 +19,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
```
## Release Process

View File

@ -1,4 +1,4 @@
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
@ -14,8 +14,10 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all
yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!

View File

@ -1,10 +1,10 @@
'use strict'; // eslint-disable-line strict
/**
* Catch uncaught exceptions and add timestamp to aid debugging
*/
process.on('uncaughtException', err => {
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
});
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
require('./lib/server.js')();

View File

@ -8,15 +8,17 @@ const crypto = require('crypto');
const { v4: uuidv4 } = require('uuid');
const cronParser = require('cron-parser');
const joi = require('@hapi/joi');
const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
const { s3routes, auth: arsenalAuth, s3middleware } = require('arsenal');
const { isValidBucketName } = s3routes.routesUtils;
const validateAuthConfig = arsenalAuth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex,
allowedUtapiEventFilterFields, allowedUtapiEventFilterStates,
} = require('../constants');
const { utapiVersion } = require('utapi');
const { scaleMsPerDay } = s3middleware.objectUtils;
const constants = require('../constants');
// config paths
@ -105,6 +107,47 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config');
}
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints');
@ -237,6 +280,60 @@ function hdClientLocationConstraintAssert(configHd) {
return hdclientFields;
}
function azureArchiveLocationConstraintAssert(locationObj) {
const checkedFields = [
'azureContainerName',
'azureStorageEndpoint',
];
if (Object.keys(locationObj.details).length === 0 ||
!checkedFields.every(field => field in locationObj.details)) {
return;
}
const {
azureContainerName,
azureStorageEndpoint,
} = locationObj.details;
const stringFields = [
azureContainerName,
azureStorageEndpoint,
];
stringFields.forEach(field => {
assert(typeof field === 'string',
`bad config: ${field} must be a string`);
});
let hasAuthMethod = false;
if (locationObj.details.sasToken !== undefined) {
assert(typeof locationObj.details.sasToken === 'string',
`bad config: ${locationObj.details.sasToken} must be a string`);
hasAuthMethod = true;
}
if (locationObj.details.azureStorageAccountName !== undefined &&
locationObj.details.azureStorageAccessKey !== undefined) {
assert(typeof locationObj.details.azureStorageAccountName === 'string',
`bad config: ${locationObj.details.azureStorageAccountName} must be a string`);
assert(typeof locationObj.details.azureStorageAccessKey === 'string',
`bad config: ${locationObj.details.azureStorageAccessKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
if (locationObj.details.tenantId !== undefined &&
locationObj.details.clientId !== undefined &&
locationObj.details.clientKey !== undefined) {
assert(typeof locationObj.details.tenantId === 'string',
`bad config: ${locationObj.details.tenantId} must be a string`);
assert(typeof locationObj.details.clientId === 'string',
`bad config: ${locationObj.details.clientId} must be a string`);
assert(typeof locationObj.details.clientKey === 'string',
`bad config: ${locationObj.details.clientKey} must be a string`);
assert(!hasAuthMethod, 'Multiple authentication methods are not allowed');
hasAuthMethod = true;
}
assert(hasAuthMethod, 'Missing authentication method');
}
function dmfLocationConstraintAssert(locationObj) {
const checkedFields = [
'endpoint',
@ -280,7 +377,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) {
const supportedBackends =
['mem', 'file', 'scality',
'mongodb', 'dmf'].concat(Object.keys(validExternalBackends));
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => {
@ -391,6 +488,9 @@ function locationConstraintAssert(locationConstraints) {
if (locationConstraints[l].type === 'dmf') {
dmfLocationConstraintAssert(locationConstraints[l]);
}
if (locationConstraints[l].type === 'azure_archive') {
azureArchiveLocationConstraintAssert(locationConstraints[l]);
}
if (locationConstraints[l].type === 'pfs') {
assert(typeof details.pfsDaemonEndpoint === 'object',
'bad config: pfsDaemonEndpoint is mandatory and must be an object');
@ -402,27 +502,23 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient);
}
});
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
}
function parseUtapiReindex(config) {
const {
enabled,
schedule,
sentinel,
redis,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object');
assert(typeof sentinel.port === 'number',
'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
'bad config: utapi.reindex.enabled must be a boolean');
const parsedRedis = parseRedisConfig(redis);
assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi reindex redis config requires a list of sentinels');
assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number',
@ -440,6 +536,13 @@ function parseUtapiReindex(config) {
'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`);
}
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
}
function requestsConfigAssert(requestsConfig) {
@ -527,7 +630,6 @@ class Config extends EventEmitter {
// Read config automatically
this._getLocationConfig();
this._getConfig();
this._configureBackends();
}
_getLocationConfig() {
@ -739,11 +841,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints;
}
this.clusters = false;
if (config.clusters !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: clusters must be a positive integer');
this.clusters = config.clusters;
this.workers = false;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
this.workers = config.workers;
}
if (config.usEastBehavior !== undefined) {
@ -981,8 +1083,7 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) {
assert(
this._verifyRedisPassword(config.localCache.password),
assert(typeof config.localCache.password === 'string',
'config: vad password for localCache. password must' +
' be a string');
}
@ -1008,56 +1109,46 @@ class Config extends EventEmitter {
}
if (config.redis) {
if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
}
if (config.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(config.redis.sentinelPassword));
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
this.redis = {};
assert(typeof config.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
this.redis = parseRedisConfig(config.redis);
}
if (config.scuba) {
this.scuba = {};
if (config.scuba.host) {
assert(typeof config.scuba.host === 'string',
'bad config: scuba host must be a string');
this.scuba.host = config.scuba.host;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
if (config.scuba.port) {
assert(Number.isInteger(config.scuba.port)
&& config.scuba.port > 0,
'bad config: scuba port must be a positive integer');
this.scuba.port = config.scuba.port;
}
}
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
assert(typeof process.env.SCUBA_HOST === 'string',
'bad config: scuba host must be a string');
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
&& Number(process.env.SCUBA_PORT) > 0,
'bad config: scuba port must be a positive integer');
this.scuba = {
host: process.env.SCUBA_HOST,
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) {
this.utapi = { component: 's3' };
if (config.utapi.host) {
@ -1086,50 +1177,8 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' +
'configuration: redis');
if (config.utapi.redis) {
if (config.utapi.redis.sentinels) {
this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis = parseRedisConfig(config.utapi.redis);
if (this.utapi.redis.retry === undefined) {
this.utapi.redis.retry = {
connectBackoff: {
min: 10,
@ -1140,22 +1189,6 @@ class Config extends EventEmitter {
},
};
}
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
}
if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics;
@ -1225,8 +1258,7 @@ class Config extends EventEmitter {
}
if (config.utapi && config.utapi.reindex) {
parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
}
}
@ -1271,6 +1303,8 @@ class Config extends EventEmitter {
}
}
this.authdata = config.authdata || 'authdata.json';
this.kms = {};
if (config.kms) {
assert(typeof config.kms.userName === 'string');
@ -1490,25 +1524,6 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs;
}
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort.
@ -1574,6 +1589,7 @@ class Config extends EventEmitter {
// Version of the configuration we're running under
this.overlayVersion = config.overlayVersion || 0;
this._setTimeOptions();
this.multiObjectDeleteConcurrency = constants.multiObjectDeleteConcurrency;
const extractedNumber = Number.parseInt(config.multiObjectDeleteConcurrency, 10);
if (!isNaN(extractedNumber) && extractedNumber > 0 && extractedNumber < 1000) {
@ -1597,43 +1613,83 @@ class Config extends EventEmitter {
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
// NOTE: EXPIRE_ONE_DAY_EARLIER and TRANSITION_ONE_DAY_EARLIER are deprecated in favor of
// TIME_PROGRESSION_FACTOR which decreases the weight attributed to a day in order to among other things
// expedite the lifecycle of objects.
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// decreases the weight attributed to a day in order to expedite the lifecycle of objects.
const timeProgressionFactor = Number.parseInt(process.env.TIME_PROGRESSION_FACTOR, 10) || 1;
const isIncompatible = (expireOneDayEarlier || transitionOneDayEarlier) && (timeProgressionFactor > 1);
assert(!isIncompatible, 'The environment variables "EXPIRE_ONE_DAY_EARLIER" or ' +
'"TRANSITION_ONE_DAY_EARLIER" are not compatible with the "TIME_PROGRESSION_FACTOR" variable.');
// The scaledMsPerDay value is initially set to the number of milliseconds per day
// (24 * 60 * 60 * 1000) as the default value.
// However, during testing, if the timeProgressionFactor is defined and greater than 1,
// the scaledMsPerDay value is decreased. This adjustment allows for simulating actions occurring
// earlier in time.
const scaledMsPerDay = scaleMsPerDay(timeProgressionFactor);
this.timeOptions = {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
};
}
getTimeOptions() {
return this.timeOptions;
}
_getAuthData() {
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
}
_configureBackends() {
_configureBackends(config) {
const backends = config.backends || {};
/**
* Configure the backends for Authentication, Data and Metadata.
*/
let auth = 'mem';
let data = 'multiple';
let metadata = 'file';
let kms = 'file';
let auth = backends.auth || 'mem';
let data = backends.data || 'multiple';
let metadata = backends.metadata || 'file';
let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi'
);
auth = process.env.S3BACKEND;
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND;
}
if (process.env.S3VAULT) {
auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
}
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData;
if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) {
process.env.SCALITY_SECRET_ACCESS_KEY) {
authData = buildAuthDataAccount(
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
} else {
authData = this._getAuthData();
}
@ -1641,7 +1697,7 @@ class Config extends EventEmitter {
throw new Error('bad config: invalid auth config file.');
}
this.authData = authData;
} else if (auth === 'multiple') {
} else if (auth === 'multiple') {
const authData = this._getAuthData();
if (validateAuthConfig(authData)) {
throw new Error('bad config: invalid auth config file.');
@ -1656,9 +1712,9 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple'
);
data = process.env.S3DATA;
}
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
}
assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined,
@ -1671,18 +1727,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) {
kms = process.env.S3KMS;
}
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = {
auth,
data,
metadata,
kms,
quota,
};
}
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) {
this.authData.accounts = accounts;
this.emit('authdata-update');
@ -1805,10 +1861,19 @@ class Config extends EventEmitter {
.update(instanceId)
.digest('hex');
}
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
}
module.exports = {
parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert,
ConfigObject: Config,
config: new Config(),
@ -1816,4 +1881,5 @@ module.exports = {
bucketNotifAssert,
azureGetStorageAccountName,
azureGetLocationCredentials,
azureArchiveLocationConstraintAssert,
};

View File

@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors');
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut');
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight');
@ -44,7 +47,7 @@ const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy');
const objectDelete = require('./objectDelete');
const { objectDelete } = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL');
@ -82,6 +85,10 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
@ -190,14 +197,17 @@ const api = {
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(err);
return next(arsenalError);
}
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, next) => {
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
@ -207,7 +217,7 @@ const api = {
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}
// issue 100 Continue to the client
writeContinue(request, response);
@ -238,12 +248,12 @@ const api = {
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
@ -254,13 +264,14 @@ const api = {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params);
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
},
),
], (err, userInfo, authorizationResults, streamingV4Params) => {
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
return callback(err);
}
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) {
@ -277,19 +288,23 @@ const api = {
return acc;
}, {});
}
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params,
log, callback, authorizationResults);
log, methodCallback, authorizationResults);
}
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback);
sourceObject, sourceVersionId, log, methodCallback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, callback);
return this[apiMethod](userInfo, request, log, methodCallback);
});
},
bucketDelete,
@ -316,11 +331,14 @@ const api = {
bucketPutReplication,
bucketGetReplication,
bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle,
bucketDeleteLifecycle,
bucketPutPolicy,
bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy,
bucketPutObjectLock,
bucketPutNotification,

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
if (apiMethod === 'bucketPut') {
return null;
}
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';

View File

@ -2,11 +2,13 @@
* Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020
*/
const ObjectMDArchive = require('arsenal').models.ObjectMDArchive;
const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors;
const { config } = require('../../../Config');
const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Get response header "x-amz-restore"
* Be called by objectHead.js
@ -32,7 +34,6 @@ function getAmzRestoreResHeader(objMD) {
return undefined;
}
/**
* Check if restore can be done.
*
@ -41,6 +42,23 @@ function getAmzRestoreResHeader(objMD) {
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/
function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage,
@ -52,18 +70,7 @@ function _validateStartRestore(objectMD, log) {
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreCompletedAt
&& new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt && !objectMD.archive?.restoreCompletedAt) {
if (objectMD.archive?.restoreRequestedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.',
@ -120,22 +127,36 @@ function validatePutVersionId(objMD, versionId, log) {
}
/**
* Check if the object is already restored
* Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored
*/
function isObjectAlreadyRestored(objectMD, log) {
// check if restoreCompletedAt field exists
// and archive.restoreWillExpireAt > current time
const isObjectAlreadyRestored = objectMD.archive?.restoreCompletedAt
&& new Date(objectMD.archive?.restoreWillExpireAt) >= new Date(Date.now());
log.debug('The restore status of the object.',
{
isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored'
});
function _updateObjectExpirationDate(objectMD, log) {
// Check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
// checked earlier in the process, so checking again here would create weird states
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', {
isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored'
});
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored;
}
@ -195,12 +216,32 @@ function startRestore(objectMD, restoreParam, log, cb) {
if (updateResultError) {
return cb(updateResultError);
}
return cb(null, isObjectAlreadyRestored(objectMD, log));
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
}
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = {
startRestore,
getAmzRestoreResHeader,
validatePutVersionId,
verifyColdObjectAvailable,
};

View File

@ -52,6 +52,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version
@ -59,7 +60,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, callback) {
overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
@ -142,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) {
@ -157,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
metadataStoreParams.originOp = originOp;
}
const backendInfoObj =

View File

@ -4,23 +4,25 @@ const {
LifecycleDateTime,
LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
const {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier,
expireOneDayEarlier,
timeProgressionFactor,
});
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
}
function formatExpirationHeader(date, id) {

View File

@ -5,6 +5,7 @@ const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period
@ -20,8 +21,9 @@ function calculateRetainUntilDate(retention) {
const date = moment();
// Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate
= date.add(retainUntilDays, 'days');
= date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString();
}
/**

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/**
* Check if tier is supported
@ -59,6 +59,14 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
objectKey,
versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
};
return async.waterfall([
@ -116,6 +124,16 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD);
});
},
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -4,7 +4,7 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config');
const oneDay = 24 * 60 * 60 * 1000;
const { scaledMsPerDay } = config.getTimeOptions();
const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata
@ -460,6 +460,47 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
return options;
}
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id.
* @param {object} objMD - obj metadata
@ -471,10 +512,8 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
* version id of the null version
*/
function overwritingVersioning(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
@ -487,7 +526,7 @@ function overwritingVersioning(objMD, metadataStoreParams) {
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * oneDay)),
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
};
/* eslint-enable no-param-reassign */
@ -503,6 +542,8 @@ function overwritingVersioning(objMD, metadataStoreParams) {
};
}
restoreMetadata(objMD, metadataStoreParams);
return options;
}

View File

@ -0,0 +1,314 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -0,0 +1,58 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

58
lib/api/bucketGetQuota.js Normal file
View File

@ -0,0 +1,58 @@
const { errors } = require('arsenal');
const { pushMetric } = require('../utapi/utilities');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
/**
* bucketGetQuota - Get the bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketGetQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketGetQuota' });
const { bucketName, headers, method } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketGetQuota',
request,
};
const xml = [];
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketGetQuota',
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {
method: 'bucketGetQuota',
});
return callback(errors.NoSuchQuota, null,
corsHeaders);
}
xml.push('<Quota>', bucketQuota, '</Quota>',
'</GetBucketQuota>');
pushMetric('getBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, xml.join(''), corsHeaders);
});
}
module.exports = bucketGetQuota;

View File

@ -45,9 +45,8 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost];
} else {
log.trace('no location constraint provided on bucket put;' +
'setting us-east-1');
locationConstraintChecked = 'us-east-1';
locationConstraintChecked = Object.keys(locationConstrains)[0];
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
}
if (!locationConstraints[locationConstraintChecked]) {

View File

@ -0,0 +1,85 @@
const { waterfall } = require('async');
const { errors } = require('arsenal');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || 'bucketUpdateQuota',
request,
};
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketUpdateQuota'
});
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}
module.exports = bucketUpdateQuota;

View File

@ -6,6 +6,7 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants');
const services = require('../services');
@ -65,7 +66,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload');

View File

@ -23,13 +23,15 @@ const { isRequesterNonAccountUser } = require('./apiUtils/authorization/permissi
const { hasGovernanceBypassHeader, checkUserGovernanceBypass, ObjectLockInfo }
= require('./apiUtils/object/objectLockHelpers');
const requestUtils = policies.requestUtils;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validObjectKeys } = require('../routes/routeVeeam');
const { deleteVeeamCapabilities } = require('../routes/veeam/delete');
const { _bucketRequiresOplogUpdate } = require('./apiUtils/object/deleteObject');
const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/*
Format of xml request:
@ -331,6 +333,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId);
},
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => {
const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
@ -346,7 +351,8 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
options.replayId = objMD.uploadId;
}
return services.deleteObject(bucketName, objMD,
entry.key, options, config.multiObjectDeleteEnableOptimizations, log, (err, toDelete) => {
entry.key, options, config.multiObjectDeleteEnableOptimizations, log,
's3:ObjectRemoved:Delete', (err, toDelete) => {
if (err) {
return callback(err);
}
@ -360,8 +366,9 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
// This call will create a delete-marker
return createAndStoreObject(bucketName, bucket, entry.key,
objMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, (err, result) =>
callback(err, objMD, deleteInfo, result.versionId));
deleteInfo.newDeleteMarker, null, overheadField, log,
's3:ObjectRemoved:DeleteMarkerCreated', (err, result) =>
callback(err, objMD, deleteInfo, result.versionId));
},
], (err, objMD, deleteInfo, versionId) => {
if (err === skipError) {
@ -475,6 +482,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
return callback(errors.BadDigest);
}
const inPlayInternal = [];
const bucketName = request.bucketName;
const canonicalID = authInfo.getCanonicalID();
@ -500,8 +508,9 @@ function multiObjectDelete(authInfo, request, log, callback) {
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
request.actionImplicitDenies)) {
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results
@ -631,7 +640,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
request);
if (areAllActionsAllowed) {
inPlay.push(entry);
if (validObjectKeys.includes(entry.key)) {
inPlayInternal.push(entry.key);
} else {
inPlay.push(entry);
}
} else {
errorResults.push({
entry,
@ -642,6 +655,11 @@ function multiObjectDelete(authInfo, request, log, callback) {
return next(null, quietSetting, errorResults, inPlay, bucketMD);
});
},
function handleInternalFiles(quietSetting, errorResults, inPlay, bucketMD, next) {
return async.each(inPlayInternal,
(localInPlay, next) => deleteVeeamCapabilities(bucketName, localInPlay, bucketMD, log, next),
err => next(err, quietSetting, errorResults, inPlay, bucketMD));
},
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
bucket, next) {
return getObjMetadataAndDelete(authInfo, canonicalID, request,

View File

@ -23,6 +23,7 @@ const monitoring = require('../utilities/monitoringHandler');
const applyZenkoUserMD = require('./apiUtils/object/applyZenkoUserMD');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const versionIdUtils = versioning.VersionID;
const locationHeader = constants.objectLocationConstraintHeader;
@ -219,6 +220,14 @@ function objectCopy(authInfo, request, sourceBucket,
versionId: sourceVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
const valPutParams = {
@ -226,6 +235,7 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
checkQuota: false,
request,
};
const dataStoreContext = {
@ -239,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {};
if (request.headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject');
@ -277,7 +287,10 @@ function objectCopy(authInfo, request, sourceBucket,
});
},
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log,
return standardMetadataValidateBucketAndObj({
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => {
if (err) {
log.debug('error validating get part of request',
@ -290,6 +303,11 @@ function objectCopy(authInfo, request, sourceBucket,
log.debug('no source object', { sourceObject });
return next(err, null, destBucketMD);
}
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(sourceObjMD);
if (coldErr) {
return next(coldErr, null);
}
if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object',
{ sourceObject });

View File

@ -21,16 +21,17 @@ const objectLockedError = new Error('object locked');
const { overheadField } = require('../../constants');
/**
* objectDelete - DELETE an object from a bucket
* objectDeleteInternal - DELETE an object from a bucket
* @param {AuthInfo} authInfo - requester's infos
* @param {object} request - request object given by router,
* includes normalized headers
* @param {Logger} log - werelogs request instance
* @param {boolean} isExpiration - true if the call comes from LifecycleExpiration
* @param {function} cb - final cb to call with the result and response headers
* @return {undefined}
*/
function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' });
function objectDeleteInternal(authInfo, request, log, isExpiration, cb) {
log.debug('processing request', { method: 'objectDeleteInternal' });
if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user');
monitoring.promMetrics(
@ -166,7 +167,10 @@ function objectDelete(authInfo, request, log, cb) {
// source does not have versioning.
return createAndStoreObject(bucketName, bucketMD, objectKey,
objectMD, authInfo, canonicalID, null, request, true, null,
log, err => {
log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
err => {
if (err) {
return next(err);
}
@ -176,9 +180,11 @@ function objectDelete(authInfo, request, log, cb) {
deleteInfo.removeDeleteMarker = true;
}
return services.deleteObject(bucketName, objectMD,
objectKey, delOptions, log, (err, delResult) =>
next(err, bucketMD, objectMD, delResult,
deleteInfo));
objectKey, delOptions, false, log, isExpiration ?
's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete',
(err, delResult) =>
next(err, bucketMD, objectMD, delResult, deleteInfo));
});
}
if (delOptions && delOptions.deleteData) {
@ -199,14 +205,20 @@ function objectDelete(authInfo, request, log, cb) {
}
return services.deleteObject(bucketName, objectMD, objectKey,
delOptions, false, log, (err, delResult) => next(err, bucketMD,
objectMD, delResult, deleteInfo));
delOptions, false, log, isExpiration ?
's3:LifecycleExpiration:Delete' :
's3:ObjectRemoved:Delete',
(err, delResult) => next(err, bucketMD,
objectMD, delResult, deleteInfo));
}
// putting a new delete marker
deleteInfo.newDeleteMarker = true;
return createAndStoreObject(bucketName, bucketMD,
objectKey, objectMD, authInfo, canonicalID, null, request,
deleteInfo.newDeleteMarker, null, overheadField, log, (err, newDelMarkerRes) => {
deleteInfo.newDeleteMarker, null, overheadField, log, isExpiration ?
's3:LifecycleExpiration:DeleteMarkerCreated' :
's3:ObjectRemoved:DeleteMarkerCreated',
(err, newDelMarkerRes) => {
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo);
});
},
@ -295,4 +307,21 @@ function objectDelete(authInfo, request, log, cb) {
});
}
module.exports = objectDelete;
/**
* This function is used to delete an object from a bucket. The bucket must
* already exist and the user must have permission to delete the object.
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {werelogs.Logger} log - Logger object
* @param {function} cb - callback to server
* @return {undefined}
*/
function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' });
return objectDeleteInternal(authInfo, request, log, false, cb);
}
module.exports = {
objectDelete,
objectDeleteInternal,
};

View File

@ -91,7 +91,7 @@ function objectDeleteTagging(authInfo, request, log, callback) {
},
(bucket, objectMD, next) =>
// if external backends handles tagging
data.objectTagging('Delete', objectKey, bucket, objectMD,
data.objectTagging('Delete', objectKey, bucket.getName(), objectMD,
log, err => next(err, bucket, objectMD)),
], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin,

View File

@ -21,6 +21,7 @@ const { locationConstraints } = config;
const monitoring = require('../utilities/monitoringHandler');
const { getPartCountFromMd5 } = require('./apiUtils/object/partInfo');
const { setExpirationHeaders } = require('./apiUtils/object/expirationHeaders');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const validateHeaders = s3middleware.validateConditionalHeaders;
@ -89,16 +90,12 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
return callback(err, null, corsHeaders);
}
const verCfg = bucket.getVersioningConfiguration();
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt))) {
const error = errors.InvalidObjectState;
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(objMD);
if (coldErr) {
monitoring.promMetrics(
'GET', bucketName, error.code, 'getObject');
return callback(error, null, corsHeaders);
'GET', bucketName, coldErr.code, 'getObject');
return callback(coldErr, null, corsHeaders);
}
if (objMD.isDeleteMarker) {
const responseMetaHeaders = Object.assign({},

View File

@ -3,6 +3,7 @@ const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -71,7 +72,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
query,
} = request;
if (headers['x-amz-storage-class'] &&
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
!config.locationConstraints[headers['x-amz-storage-class']]) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', request.bucketName,
errors.InvalidStorageClass.code, 'putObject');
@ -98,7 +99,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
'The encryption method specified is not supported');
const requestType = request.apiMethods || 'objectPut';
const valParams = { authInfo, bucketName, objectKey, versionId,
requestType, request };
requestType, request, withVersionId: isPutVersion };
const canonicalID = authInfo.getCanonicalID();
if (hasNonPrintables(objectKey)) {
@ -174,7 +175,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
writeContinue(request, request._response);
return createAndStoreObject(bucketName,
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
request, false, streamingV4Params, overheadField, log, next);
request, false, streamingV4Params, overheadField, log, 's3:ObjectCreated:Put', next);
},
], (err, storingResult) => {
if (err) {
@ -242,6 +243,14 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
monitoring.promMetrics('PUT', bucketName, '200',
'putObject', newByteLength, oldByteLength, isVersionedObj,
null, ingestSize);
if (isPutVersion) {
const durationMs = Date.now() - new Date(objMD.archive.restoreRequestedAt);
monitoring.lifecycleDuration.observe(
{ type: 'restore', location: objMD.dataStoreName },
durationMs / 1000);
}
return callback(null, responseHeaders);
});
});

View File

@ -13,6 +13,8 @@ const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const versionIdUtils = versioning.VersionID;
@ -44,6 +46,14 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
versionId: reqVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
@ -67,6 +77,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPutPart',
checkQuota: false,
request,
};
@ -87,6 +98,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
objectKey: destObjectKey,
partNumber: paddedPartNumber,
uploadId,
enableQuota: true,
};
return async.waterfall([
@ -133,6 +145,11 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
sourceLocationConstraintName =
sourceObjMD.location[0].dataStoreName;
}
// check if object data is in a cold storage
const coldErr = verifyColdObjectAvailable(sourceObjMD);
if (coldErr) {
return next(coldErr, null);
}
if (sourceObjMD.isDeleteMarker) {
log.debug('delete marker on source object',
{ sourceObject });
@ -175,9 +192,16 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
return next(null, copyLocator.dataLocator, destBucketMD,
copyLocator.copyObjectSize, sourceVerId,
sourceLocationConstraintName);
sourceLocationConstraintName, sourceObjMD);
});
},
function _validateQuotas(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD, next) {
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
},
// get MPU shadow bucket to get splitter based on MD version
function getMpuShadowBucket(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,

View File

@ -21,6 +21,7 @@ const { BackendInfo } = models;
const writeContinue = require('../utilities/writeContinue');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const skipError = new Error('skip');
@ -60,6 +61,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size });
monitoring.promMetrics('PUT', request.bucketName, 400,
@ -103,6 +107,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
const { objectKey } = request;
const originalIdentityAuthzResults = request.actionImplicitDenies;
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = request.apiMethods || 'objectPutPart';
return async.waterfall([
// Get the destination bucket.
@ -122,9 +129,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}),
// Check the bucket authorization.
(destinationBucket, next) => {
// For validating the request at the destinationBucket level the
// `requestType` is the general 'objectPut'.
const requestType = request.apiMethods || 'objectPutPart';
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
log, request, request.actionImplicitDenies)) {
log.debug('access denied for user on bucket', { requestType });
@ -132,6 +136,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket);
},
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
// Get bucket server-side encryption, if it exists.
(destinationBucket, next) => getObjectSSEConfiguration(
request.headers, destinationBucket, log,

View File

@ -96,7 +96,7 @@ function objectPutTagging(authInfo, request, log, callback) {
},
(bucket, objectMD, next) =>
// if external backend handles tagging
data.objectTagging('Put', objectKey, bucket, objectMD,
data.objectTagging('Put', objectKey, bucket.getName(), objectMD,
log, err => next(err, bucket, objectMD)),
], (err, bucket, objectMD) => {
const additionalResHeaders = collectCorsHeaders(request.headers.origin,

View File

@ -1,4 +1,3 @@
const vaultclient = require('vaultclient');
const { auth } = require('arsenal');
const { config } = require('../Config');
@ -21,6 +20,7 @@ function getVaultClient(config) {
port,
https: true,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
} else {
logger.info('vaultclient configuration', {
@ -28,6 +28,7 @@ function getVaultClient(config) {
port,
https: false,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port);
}
@ -49,10 +50,6 @@ function getMemBackend(config) {
}
switch (config.backends.auth) {
case 'mem':
implName = 'vaultMem';
client = getMemBackend(config);
break;
case 'multiple':
implName = 'vaultChain';
client = new ChainBackend('s3', [
@ -60,9 +57,14 @@ case 'multiple':
getVaultClient(config),
]);
break;
default: // vault
case 'vault':
implName = 'vault';
client = getVaultClient(config);
break;
default: // mem
implName = 'vaultMem';
client = getMemBackend(config);
break;
}
module.exports = new Vault(client, implName);

View File

@ -8,20 +8,6 @@ const inMemory = require('./in_memory/backend').backend;
const file = require('./file/backend');
const KMIPClient = require('arsenal').network.kmipClient;
const Common = require('./common');
let scalityKMS;
let scalityKMSImpl;
try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms';
} catch (error) {
logger.warn('scality kms unavailable. ' +
'Using file kms backend unless mem specified.',
{ error });
scalityKMS = file;
scalityKMSImpl = 'fileKms';
}
let client;
let implName;
@ -33,8 +19,9 @@ if (config.backends.kms === 'mem') {
client = file;
implName = 'fileKms';
} else if (config.backends.kms === 'scality') {
client = scalityKMS;
implName = scalityKMSImpl;
const ScalityKMS = require('scality-kms');
client = new ScalityKMS(config.kms);
implName = 'scalityKms';
} else if (config.backends.kms === 'kmip') {
const kmipConfig = { kmip: config.kmip };
if (!kmipConfig.kmip) {

View File

@ -1,131 +0,0 @@
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const MessageType = {
/** Message that contains a configuration overlay */
CONFIG_OVERLAY_MESSAGE: 1,
/** Message that requests a metrics report */
METRICS_REQUEST_MESSAGE: 2,
/** Message that contains a metrics report */
METRICS_REPORT_MESSAGE: 3,
/** Close the virtual TCP socket associated to the channel */
CHANNEL_CLOSE_MESSAGE: 4,
/** Write data to the virtual TCP socket associated to the channel */
CHANNEL_PAYLOAD_MESSAGE: 5,
};
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const TargetType = {
/** Let the dispatcher choose the most appropriate message */
TARGET_ANY: 0,
};
const headerSize = 3;
class ChannelMessageV0 {
/**
* @param {Buffer} buffer Message bytes
*/
constructor(buffer) {
this.messageType = buffer.readUInt8(0);
this.channelNumber = buffer.readUInt8(1);
this.target = buffer.readUInt8(2);
this.payload = buffer.slice(headerSize);
}
/**
* @returns {number} Message type
*/
getType() {
return this.messageType;
}
/**
* @returns {number} Channel number if applicable
*/
getChannelNumber() {
return this.channelNumber;
}
/**
* @returns {number} Target service, or 0 to choose automatically
*/
getTarget() {
return this.target;
}
/**
* @returns {Buffer} Message payload if applicable
*/
getPayload() {
return this.payload;
}
/**
* Creates a wire representation of a channel close message
*
* @param {number} channelId Channel number
*
* @returns {Buffer} wire representation
*/
static encodeChannelCloseMessage(channelId) {
const buf = Buffer.alloc(headerSize);
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
return buf;
}
/**
* Creates a wire representation of a channel data message
*
* @param {number} channelId Channel number
* @param {Buffer} data Payload
*
* @returns {Buffer} wire representation
*/
static encodeChannelDataMessage(channelId, data) {
const buf = Buffer.alloc(data.length + headerSize);
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
data.copy(buf, headerSize);
return buf;
}
/**
* Creates a wire representation of a metrics message
*
* @param {object} body Metrics report
*
* @returns {Buffer} wire representation
*/
static encodeMetricsReportMessage(body) {
const report = JSON.stringify(body);
const buf = Buffer.alloc(report.length + headerSize);
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
buf.writeUInt8(0, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
buf.write(report, headerSize);
return buf;
}
/**
* Protocol name used for subprotocol negociation
*/
static get protocolName() {
return 'zenko-secure-channel-v0';
}
}
module.exports = {
ChannelMessageV0,
MessageType,
TargetType,
};

View File

@ -1,94 +0,0 @@
const WebSocket = require('ws');
const arsenal = require('arsenal');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
const { patchConfiguration } = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const managementAgentMessageType = {
/** Message that contains the loaded overlay */
NEW_OVERLAY: 1,
};
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
function initManagementClient() {
const { host, port } = _config.managementAgent;
const ws = new WebSocket(`ws://${host}:${port}/watch`);
ws.on('open', () => {
logger.info('connected with management agent');
});
ws.on('close', (code, reason) => {
logger.info('disconnected from management agent', { reason });
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
});
ws.on('error', error => {
logger.error('error on connection with management agent', { error });
});
ws.on('message', data => {
const method = 'initManagementclient::onMessage';
const log = logger.newRequestLogger();
let msg;
if (!data) {
log.error('message without data', { method });
return;
}
try {
msg = JSON.parse(data);
} catch (err) {
log.error('data is an invalid json', { method, err, data });
return;
}
if (msg.payload === undefined) {
log.error('message without payload', { method });
return;
}
if (typeof msg.messageType !== 'number') {
log.error('messageType is not an integer', {
type: typeof msg.messageType,
method,
});
return;
}
switch (msg.messageType) {
case managementAgentMessageType.NEW_OVERLAY:
patchConfiguration(msg.payload, log, err => {
if (err) {
log.error('failed to patch overlay', {
error: reshapeExceptionError(err),
method,
});
}
});
return;
default:
log.error('new overlay message with unmanaged message type', {
method,
type: msg.messageType,
});
return;
}
});
}
function isManagementAgentUsed() {
return process.env.MANAGEMENT_USE_AGENT === '1';
}
module.exports = {
managementAgentMessageType,
initManagementClient,
isManagementAgentUsed,
};

View File

@ -1,240 +0,0 @@
const arsenal = require('arsenal');
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
const _config = require('../Config').config;
const metadata = require('../metadata/wrapper');
const { getStoredCredentials } = require('./credentials');
const latestOverlayVersionKey = 'configuration/overlay-version';
const managementDatabaseName = 'PENSIEVE';
const replicatorEndpoint = 'zenko-cloudserver-replicator';
const { decryptSecret } = arsenal.pensieve.credentialUtils;
const { patchLocations } = arsenal.patches.locationConstraints;
const { reshapeExceptionError } = arsenal.errorUtils;
const { replicationBackends } = require('arsenal').constants;
function overlayHasVersion(overlay) {
return overlay && overlay.version !== undefined;
}
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
return (overlayHasVersion(remoteOverlay) &&
(!overlayHasVersion(cachedOverlay) ||
remoteOverlay.version > cachedOverlay.version));
}
/**
* Updates the live {Config} object with the new overlay configuration.
*
* No-op if this version was already applied to the live {Config}.
*
* @param {object} newConf Overlay configuration to apply
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, newConf)
*
* @returns {undefined}
*/
function patchConfiguration(newConf, log, cb) {
if (newConf.version === undefined) {
log.debug('no remote configuration created yet');
return process.nextTick(cb, null, newConf);
}
if (_config.overlayVersion !== undefined &&
newConf.version <= _config.overlayVersion) {
log.debug('configuration version already applied',
{ configurationVersion: newConf.version });
return process.nextTick(cb, null, newConf);
}
return getStoredCredentials(log, (err, creds) => {
if (err) {
return cb(err);
}
const accounts = [];
if (newConf.users) {
newConf.users.forEach(u => {
if (u.secretKey && u.secretKey.length > 0) {
const secretKey = decryptSecret(creds, u.secretKey);
// accountType will be service-replication or service-clueso
let serviceName;
if (u.accountType && u.accountType.startsWith('service-')) {
serviceName = u.accountType.split('-')[1];
}
const newAccount = buildAuthDataAccount(
u.accessKey, secretKey, u.canonicalId, serviceName,
u.userName);
accounts.push(newAccount.accounts[0]);
}
});
}
const restEndpoints = Object.assign({}, _config.restEndpoints);
if (newConf.endpoints) {
newConf.endpoints.forEach(e => {
restEndpoints[e.hostname] = e.locationName;
});
}
if (!restEndpoints[replicatorEndpoint]) {
restEndpoints[replicatorEndpoint] = 'us-east-1';
}
const locations = patchLocations(newConf.locations, creds, log);
if (Object.keys(locations).length !== 0) {
try {
_config.setLocationConstraints(locations);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply configuration version location ' +
'constraints', { error: exceptionError,
method: 'getStoredCredentials' });
return cb(exceptionError);
}
try {
const locationsWithReplicationBackend = Object.keys(locations)
// NOTE: In Orbit, we don't need to have Scality location in our
// replication endpoind config, since we do not replicate to
// any Scality Instance yet.
.filter(key => replicationBackends
[locations[key].type])
.reduce((obj, key) => {
/* eslint no-param-reassign:0 */
obj[key] = locations[key];
return obj;
}, {});
_config.setReplicationEndpoints(
locationsWithReplicationBackend);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply replication endpoints',
{ error: exceptionError, method: 'getStoredCredentials' });
return cb(exceptionError);
}
}
_config.setAuthDataAccounts(accounts);
_config.setRestEndpoints(restEndpoints);
_config.setPublicInstanceId(newConf.instanceId);
if (newConf.browserAccess) {
if (Boolean(_config.browserAccessEnabled) !==
Boolean(newConf.browserAccess.enabled)) {
_config.browserAccessEnabled =
Boolean(newConf.browserAccess.enabled);
_config.emit('browser-access-enabled-change');
}
}
_config.overlayVersion = newConf.version;
log.info('applied configuration version',
{ configurationVersion: _config.overlayVersion });
return cb(null, newConf);
});
}
/**
* Writes configuration version to the management database
*
* @param {object} cachedOverlay Latest stored configuration version
* for freshness comparison purposes
* @param {object} remoteOverlay New configuration version
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, remoteOverlay)
*
* @returns {undefined}
*/
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
const objName = `configuration/overlay/${remoteOverlay.version}`;
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
{}, log, error => {
if (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not save configuration',
{ error: exceptionError,
method: 'saveConfigurationVersion',
configurationVersion: remoteOverlay.version });
cb(exceptionError);
return;
}
metadata.putObjectMD(managementDatabaseName,
latestOverlayVersionKey, remoteOverlay.version, {}, log,
error => {
if (error) {
log.error('could not save configuration version', {
configurationVersion: remoteOverlay.version,
});
}
cb(error, remoteOverlay);
});
});
} else {
log.debug('no remote configuration to cache yet');
process.nextTick(cb, null, remoteOverlay);
}
}
/**
* Loads the latest cached configuration overlay from the management
* database, without contacting the Orbit API.
*
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} callback Function called with (error, cachedOverlay)
*
* @returns {undefined}
*/
function loadCachedOverlay(log, callback) {
return metadata.getObjectMD(managementDatabaseName,
latestOverlayVersionKey, {}, log, (err, version) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return metadata.getObjectMD(managementDatabaseName,
`configuration/overlay/${version}`, {}, log, (err, conf) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return callback(null, conf);
});
});
}
function applyAndSaveOverlay(overlay, log) {
patchConfiguration(overlay, log, err => {
if (err) {
log.error('could not apply pushed overlay', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
saveConfigurationVersion(null, overlay, log, err => {
if (err) {
log.error('could not cache overlay version', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
log.info('overlay push processed');
});
});
}
module.exports = {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
saveConfigurationVersion,
remoteOverlayIsNewer,
applyAndSaveOverlay,
};

View File

@ -1,145 +0,0 @@
const arsenal = require('arsenal');
const forge = require('node-forge');
const request = require('../utilities/request');
const metadata = require('../metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
const { reshapeExceptionError } = arsenal.errorUtils;
/**
* Retrieves Orbit API token from the management database.
*
* The token is used to authenticate stat posting and
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function getStoredCredentials(log, callback) {
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
log, callback);
}
function issueCredentials(managementEndpoint, instanceId, log, callback) {
log.info('registering with API to get token');
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
const postData = {
publicKey,
};
request.post(`${managementEndpoint}/${instanceId}/register`,
{ body: postData, json: true }, (error, response, body) => {
if (error) {
return callback(error);
}
if (response.statusCode !== 201) {
log.error('could not register instance', {
statusCode: response.statusCode,
});
return callback(arsenal.errors.InternalError);
}
/* eslint-disable no-param-reassign */
body.privateKey = privateKey;
/* eslint-enable no-param-reassign */
return callback(null, body);
});
}
function confirmInstanceCredentials(
managementEndpoint, instanceId, creds, log, callback) {
const postData = {
serial: creds.serial || 0,
publicKey: creds.publicKey,
};
const opts = {
headers: {
'x-instance-authentication-token': creds.token,
},
body: postData,
};
request.post(`${managementEndpoint}/${instanceId}/confirm`,
opts, (error, response) => {
if (error) {
return callback(error);
}
if (response.statusCode === 200) {
return callback(null, instanceId, creds.token);
}
return callback(arsenal.errors.InternalError);
});
}
/**
* Initializes credentials and PKI in the management database.
*
* In case the management database is new and empty, the instance
* is registered as new against the Orbit API with newly-generated
* RSA key pair.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function initManagementCredentials(
managementEndpoint, instanceId, log, callback) {
getStoredCredentials(log, (error, value) => {
if (error) {
if (error.is.NoSuchKey) {
return issueCredentials(managementEndpoint, instanceId, log,
(error, value) => {
if (error) {
log.error('could not issue token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials' });
return callback(error);
}
log.debug('saving token');
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, value, {}, log, error => {
if (error) {
log.error('could not save token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials',
});
return callback(error);
}
log.info('saved token locally, ' +
'confirming instance');
return confirmInstanceCredentials(
managementEndpoint, instanceId, value, log,
callback);
});
});
}
log.debug('could not get token', { error });
return callback(error);
}
log.info('returning existing token');
if (Date.now() - value.issueDate > tokenRotationDelay) {
log.warn('management API token is too old, should re-issue');
}
return callback(null, instanceId, value.token);
});
}
module.exports = {
getStoredCredentials,
initManagementCredentials,
};

View File

@ -1,138 +0,0 @@
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('../metadata/wrapper');
const logger = require('../utilities/logger');
const {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
} = require('./configuration');
const { initManagementCredentials } = require('./credentials');
const { startWSManagementClient } = require('./push');
const { startPollingManagementClient } = require('./poll');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const initRemoteManagementRetryDelay = 10000;
const managementEndpointRoot =
process.env.MANAGEMENT_ENDPOINT ||
'https://api.zenko.io';
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
const pushEndpointRoot =
process.env.PUSH_ENDPOINT ||
'https://push.api.zenko.io';
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
function initManagementDatabase(log, callback) {
// XXX choose proper owner names
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
'owner display name', new Date().toJSON());
metadata.createBucket(managementDatabaseName, md, log, error => {
if (error) {
if (error.is.BucketAlreadyExists) {
log.info('created management database');
return callback();
}
log.error('could not initialize management database',
{ error: reshapeExceptionError(error),
method: 'initManagementDatabase' });
return callback(error);
}
log.info('initialized management database');
return callback();
});
}
function startManagementListeners(instanceId, token) {
const mode = process.env.MANAGEMENT_MODE || 'push';
if (mode === 'push') {
const url = `${pushEndpoint}/${instanceId}/ws`;
startWSManagementClient(url, token);
} else {
startPollingManagementClient(managementEndpoint, instanceId, token);
}
}
/**
* Initializes Orbit-based management by:
* - creating the management database in metadata
* - generating a key pair for credentials encryption
* - generating an instance-unique ID
* - getting an authentication token for the API
* - loading and applying the latest cached overlay configuration
* - starting a configuration update and metrics push background task
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function to call once the overlay is loaded
* (overlay)
*
* @returns {undefined}
*/
function initManagement(log, callback) {
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|| process.env.S3BACKEND === 'mem') {
log.info('remote management disabled');
return;
}
/* Temporary check before to fully move to the process management agent. */
if (isManagementAgentUsed() ^ typeof callback === 'function') {
let msg = 'misuse of initManagement function: ';
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
msg += `, callback type: ${typeof callback}`;
throw new Error(msg);
}
async.waterfall([
// eslint-disable-next-line arrow-body-style
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
cb => initManagementDatabase(log, cb),
cb => metadata.getUUID(log, cb),
(instanceId, cb) => initManagementCredentials(
managementEndpoint, instanceId, log, cb),
(instanceId, token, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, {});
return;
}
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
token, overlay));
},
(instanceId, token, overlay, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, overlay);
return;
}
patchConfiguration(overlay, log,
err => cb(err, instanceId, token, overlay));
},
], (error, instanceId, token, overlay) => {
if (error) {
log.error('could not initialize remote management, retrying later',
{ error: reshapeExceptionError(error),
method: 'initManagement' });
setTimeout(initManagement,
initRemoteManagementRetryDelay,
logger.newRequestLogger());
} else {
log.info(`this deployment's Instance ID is ${instanceId}`);
log.end('management init done');
startManagementListeners(instanceId, token);
if (callback) {
callback(overlay);
}
}
});
}
module.exports = {
initManagement,
initManagementDatabase,
};

View File

@ -1,157 +0,0 @@
const arsenal = require('arsenal');
const async = require('async');
const request = require('../utilities/request');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const {
loadCachedOverlay,
patchConfiguration,
saveConfigurationVersion,
} = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const pushReportDelay = 30000;
const pullConfigurationOverlayDelay = 60000;
function loadRemoteOverlay(
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
log.debug('loading remote overlay');
const opts = {
headers: {
'x-instance-authentication-token': remoteToken,
'x-scal-request-id': log.getSerializedUids(),
},
json: true,
};
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
(error, response, body) => {
if (error) {
return cb(error);
}
if (response.statusCode === 200) {
return cb(null, cachedOverlay, body);
}
if (response.statusCode === 404) {
return cb(null, cachedOverlay, {});
}
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
});
}
// TODO save only after successful patch
function applyConfigurationOverlay(
managementEndpoint, instanceId, remoteToken, log) {
async.waterfall([
wcb => loadCachedOverlay(log, wcb),
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
log, wcb),
(cachedOverlay, wcb) =>
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
cachedOverlay, log, wcb),
(cachedOverlay, remoteOverlay, wcb) =>
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
log, wcb),
], error => {
if (error) {
log.error('could not apply managed configuration',
{ error: reshapeExceptionError(error),
method: 'applyConfigurationOverlay' });
}
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
});
}
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
const toURL = `${managementEndpoint}/${instanceId}/stats`;
const toOptions = {
json: true,
headers: {
'content-type': 'application/json',
'x-instance-authentication-token': remoteToken,
},
body: report,
};
const toCallback = (err, response, body) => {
if (err) {
logger.info('could not post stats', { error: err });
}
if (response && response.statusCode !== 201) {
logger.info('could not post stats', {
body,
statusCode: response.statusCode,
});
}
if (next) {
next(null, instanceId, remoteToken);
}
};
return request.post(toURL, toOptions, toCallback);
}
function getStats(next) {
const fromURL = `http://localhost:${_config.port}/_/report`;
const fromOptions = {
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
},
};
return request.get(fromURL, fromOptions, next);
}
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
if (process.env.PUSH_STATS === 'false') {
return;
}
getStats((err, res, report) => {
if (err) {
logger.info('could not retrieve stats', { error: err });
return;
}
logger.debug('report', { report });
postStats(
managementEndpoint,
instanceId,
remoteToken,
report,
next
);
return;
});
setTimeout(pushStats, pushReportDelay,
managementEndpoint, instanceId, remoteToken);
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Periodically polls for configuration updates, and pushes stats at
* a fixed interval.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {string} remoteToken API authentication token
*
* @returns {undefined}
*/
function startPollingManagementClient(
managementEndpoint, instanceId, remoteToken) {
metadata.notifyBucketChange(() => {
pushStats(managementEndpoint, instanceId, remoteToken);
});
pushStats(managementEndpoint, instanceId, remoteToken);
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
}
module.exports = {
startPollingManagementClient,
};

View File

@ -1,301 +0,0 @@
const arsenal = require('arsenal');
const HttpsProxyAgent = require('https-proxy-agent');
const net = require('net');
const request = require('../utilities/request');
const { URL } = require('url');
const WebSocket = require('ws');
const assert = require('assert');
const http = require('http');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const { applyAndSaveOverlay } = require('./configuration');
const {
ChannelMessageV0,
MessageType,
} = require('./ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const PING_INTERVAL_MS = 10000;
const subprotocols = [ChannelMessageV0.protocolName];
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|| 'localhost';
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|| _config.port;
let overlayMessageListener = null;
let connected = false;
// No wildcard nor cidr/mask match for now
function createWSAgent(pushEndpoint, env, log) {
const url = new URL(pushEndpoint);
const noProxy = (env.NO_PROXY || env.no_proxy
|| '').split(',');
if (noProxy.includes(url.hostname)) {
log.info('push server ws has proxy exclusion', { noProxy });
return null;
}
if (url.protocol === 'https:' || url.protocol === 'wss:') {
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
if (httpsProxy) {
log.info('push server ws using https proxy', { httpsProxy });
return new HttpsProxyAgent(httpsProxy);
}
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
if (httpProxy) {
log.info('push server ws using http proxy', { httpProxy });
return new HttpsProxyAgent(httpProxy);
}
}
const allProxy = (env.ALL_PROXY || env.all_proxy);
if (allProxy) {
log.info('push server ws using wildcard proxy', { allProxy });
return new HttpsProxyAgent(allProxy);
}
log.info('push server ws not using proxy');
return null;
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Receives pushed Websocket messages on configuration updates, and
* sends stat messages in response to API sollicitations.
*
* @param {string} url API endpoint
* @param {string} token API authentication token
* @param {function} cb end-of-connection callback
*
* @returns {undefined}
*/
function startWSManagementClient(url, token, cb) {
logger.info('connecting to push server', { url });
function _logError(error, errorMessage, method) {
if (error) {
logger.error(`management client error: ${errorMessage}`,
{ error: reshapeExceptionError(error), method });
}
}
const socketsByChannelId = [];
const headers = {
'x-instance-authentication-token': token,
};
const agent = createWSAgent(url, process.env, logger);
const ws = new WebSocket(url, subprotocols, { headers, agent });
let pingTimeout = null;
function sendPing() {
if (ws.readyState === ws.OPEN) {
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
}
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
}
function initiatePing() {
clearTimeout(pingTimeout);
setTimeout(sendPing, PING_INTERVAL_MS);
}
function pushStats(options) {
if (process.env.PUSH_STATS === 'false') {
return;
}
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
const fromOptions = {
json: true,
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
'x-scal-report-skip-cache': Boolean(options && options.noCache),
},
};
request.get(fromURL, fromOptions, (err, response, body) => {
if (err) {
_logError(err, 'failed to get metrics report', 'pushStats');
return;
}
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
err => _logError(err, 'failed to send metrics report message',
'pushStats'));
});
}
function closeChannel(channelId) {
const socket = socketsByChannelId[channelId];
if (socket) {
socket.destroy();
delete socketsByChannelId[channelId];
}
}
function receiveChannelData(channelId, payload) {
let socket = socketsByChannelId[channelId];
if (!socket) {
socket = net.createConnection(cloudServerPort, cloudServerHost);
socket.on('data', data => {
ws.send(ChannelMessageV0.
encodeChannelDataMessage(channelId, data), err =>
_logError(err, 'failed to send channel data message',
'receiveChannelData'));
});
socket.on('connect', () => {
});
socket.on('drain', () => {
});
socket.on('error', error => {
logger.error('failed to connect to S3', {
code: error.code,
host: error.address,
port: error.port,
});
});
socket.on('end', () => {
socket.destroy();
socketsByChannelId[channelId] = null;
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
err => _logError(err,
'failed to send channel close message',
'receiveChannelData'));
});
socketsByChannelId[channelId] = socket;
}
socket.write(payload);
}
function browserAccessChangeHandler() {
if (!_config.browserAccessEnabled) {
socketsByChannelId.forEach(s => s.close());
}
}
ws.on('open', () => {
connected = true;
logger.info('connected to push server');
metadata.notifyBucketChange(() => {
pushStats({ noCache: true });
});
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
initiatePing();
});
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
ws.on('close', () => {
logger.info('disconnected from push server, reconnecting in 10s');
metadata.notifyBucketChange(null);
_config.removeListener('browser-access-enabled-change',
browserAccessChangeHandler);
setTimeout(startWSManagementClient, 10000, url, token);
connected = false;
if (cbOnce) {
process.nextTick(cbOnce);
}
});
ws.on('error', err => {
connected = false;
logger.error('error from push server connection', {
error: err,
errorMessage: err.message,
});
if (cbOnce) {
process.nextTick(cbOnce, err);
}
});
ws.on('ping', () => {
ws.pong(err => _logError(err, 'failed to send a pong'));
});
ws.on('pong', () => {
initiatePing();
});
ws.on('message', data => {
const log = logger.newRequestLogger();
const message = new ChannelMessageV0(data);
switch (message.getType()) {
case CONFIG_OVERLAY_MESSAGE:
if (!isManagementAgentUsed()) {
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
} else {
if (overlayMessageListener) {
overlayMessageListener(message.getPayload().toString());
}
}
break;
case METRICS_REQUEST_MESSAGE:
pushStats();
break;
case CHANNEL_CLOSE_MESSAGE:
closeChannel(message.getChannelNumber());
break;
case CHANNEL_PAYLOAD_MESSAGE:
// browserAccessEnabled defaults to true unless explicitly false
if (_config.browserAccessEnabled !== false) {
receiveChannelData(
message.getChannelNumber(), message.getPayload());
}
break;
default:
logger.error('unknown message type from push server',
{ messageType: message.getType() });
}
});
}
function addOverlayMessageListener(callback) {
assert(typeof callback === 'function');
overlayMessageListener = callback;
}
function startPushConnectionHealthCheckServer(cb) {
const server = http.createServer((req, res) => {
if (req.url !== '/_/healthcheck') {
res.writeHead(404);
res.write('Not Found');
} else if (connected) {
res.writeHead(200);
res.write('Connected');
} else {
res.writeHead(503);
res.write('Not Connected');
}
res.end();
});
server.listen(_config.port, cb);
}
module.exports = {
createWSAgent,
startWSManagementClient,
startPushConnectionHealthCheckServer,
addOverlayMessageListener,
};

View File

@ -6,6 +6,9 @@ const BucketInfo = require('arsenal').models.BucketInfo;
const { isBucketAuthorized, isObjAuthorized } =
require('../api/apiUtils/authorization/permissionChecks');
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
const { onlyOwnerAllowed } = require('../../constants');
const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext');
const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils');
/** getNullVersionFromMaster - retrieves the null version
* metadata via retrieving the master key
@ -152,9 +155,6 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
});
return errors.NoSuchBucket;
}
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
const onlyOwnerAllowed = ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'];
const canonicalID = authInfo.getCanonicalID();
if (!Array.isArray(requestType)) {
requestType = [requestType];
@ -184,7 +184,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
* @return {undefined} - and call callback with params err, bucket md
*/
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params;
let requestType = params.requestType;
if (!Array.isArray(requestType)) {
requestType = [requestType];
@ -238,6 +238,21 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
}
return next(null, bucket, objMD);
},
(bucket, objMD, next) => {
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
actionWithDataDeletion[type]);
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
// withVersionId cover cases when an object is being restored with a specific version ID.
// In this case, the storage space was already accounted for when the RestoreObject API call
// was made, so we don't need to add any inflight, but quota must be evaluated.
if (!checkQuota) {
return next(null, bucket, objMD);
}
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
request?.parsedContentLength || 0, objMD, params.destObjMD);
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
contentLength, withVersionId, log, err => next(err, bucket, objMD));
},
], (err, bucket, objMD) => {
if (err) {
// still return bucket for cors headers
@ -279,6 +294,7 @@ module.exports = {
validateBucket,
metadataGetObject,
metadataGetObjects,
processBytesToWrite,
standardMetadataValidateBucketAndObj,
standardMetadataValidateBucket,
};

View File

@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
const { config } = require('../Config');
const logger = require('../utilities/logger');
const constants = require('../../constants');
const bucketclient = require('bucketclient');
const clientName = config.backends.metadata;
let bucketclient;
let params;
if (clientName === 'mem') {
params = {};
@ -21,6 +21,7 @@ if (clientName === 'mem') {
noDbOpen: null,
};
} else if (clientName === 'scality') {
bucketclient = require('bucketclient');
params = {
bucketdBootstrap: config.bucketd.bootstrap,
bucketdLog: config.bucketd.log,

17
lib/quotas/quotas.js Normal file
View File

@ -0,0 +1,17 @@
const { config } = require('../Config');
const { ScubaClientImpl } = require('./scuba/wrapper');
let instance = null;
switch (config.backends.quota) {
case 'scuba':
instance = new ScubaClientImpl(config);
break;
default:
instance = {
enabled: false,
};
break;
}
module.exports = instance;

View File

@ -0,0 +1,80 @@
const util = require('util');
const { default: ScubaClient } = require('scubaclient');
const { externalBackendHealthCheckInterval } = require('../../../constants');
const monitoring = require('../../utilities/monitoringHandler');
class ScubaClientImpl extends ScubaClient {
constructor(config) {
super(config.scuba);
this.enabled = false;
this.maxStaleness = config.quota.maxStaleness;
this._healthCheckTimer = null;
this._log = null;
this._getLatestMetricsCallback = util.callbackify(this.getLatestMetrics);
if (config.scuba) {
this.enabled = true;
} else {
this.enabled = false;
}
}
setup(log) {
this._log = log;
if (this.enabled) {
this.periodicHealthCheck();
}
}
_healthCheck() {
return this.healthCheck().then(data => {
if (data?.date) {
const date = new Date(data.date);
if (Date.now() - date.getTime() > this.maxStaleness) {
throw new Error('Data is stale, disabling quotas');
}
}
if (!this.enabled) {
this._log.info('Scuba health check passed, enabling quotas');
}
monitoring.utilizationServiceAvailable.set(1);
this.enabled = true;
}).catch(err => {
if (this.enabled) {
this._log.warn('Scuba health check failed, disabling quotas', {
err: err.name,
description: err.message,
});
}
monitoring.utilizationServiceAvailable.set(0);
this.enabled = false;
});
}
periodicHealthCheck() {
if (this._healthCheckTimer) {
clearInterval(this._healthCheckTimer);
}
this._healthCheck();
this._healthCheckTimer = setInterval(async () => {
this._healthCheck();
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|| externalBackendHealthCheckInterval);
}
getUtilizationMetrics(metricsClass, resourceName, options, body, callback) {
const requestStartTime = process.hrtime.bigint();
return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => {
const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime);
monitoring.utilizationMetricsRetrievalDuration.labels({
code: err ? (err.statusCode || 500) : 200,
class: metricsClass,
}).observe(responseTimeInNs / 1e9);
return callback(err, data);
});
}
}
module.exports = {
ScubaClientImpl,
};

View File

@ -37,6 +37,7 @@ const kms = require('../kms/wrapper');
const { listLifecycleCurrents } = require('../api/backbeat/listLifecycleCurrents');
const { listLifecycleNonCurrents } = require('../api/backbeat/listLifecycleNonCurrents');
const { listLifecycleOrphanDeleteMarkers } = require('../api/backbeat/listLifecycleOrphanDeleteMarkers');
const { objectDeleteInternal } = require('../api/objectDelete');
const { CURRENT_TYPE, NON_CURRENT_TYPE, ORPHAN_DM_TYPE } = constants.lifecycleListing;
const lifecycleTypeCalls = {
@ -709,6 +710,19 @@ function putObject(request, response, log, callback) {
});
}
function deleteObjectFromExpiration(request, response, userInfo, log, callback) {
return objectDeleteInternal(userInfo, request, log, true, err => {
if (err) {
log.error('error deleting object from expiration', {
error: err,
method: 'deleteObjectFromExpiration',
});
return callback(err);
}
return _respond(response, {}, log, callback);
});
}
function deleteObject(request, response, log, callback) {
const err = _checkMultipleBackendRequest(request, log);
if (err) {
@ -1274,6 +1288,7 @@ const backbeatRoutes = {
},
},
DELETE: {
expiration: deleteObjectFromExpiration,
multiplebackenddata: {
deleteobject: deleteObject,
deleteobjecttagging: deleteObjectTagging,

225
lib/routes/routeVeeam.js Normal file
View File

@ -0,0 +1,225 @@
const url = require('url');
const async = require('async');
const vault = require('../auth/vault');
const putVeeamFile = require('./veeam/put');
const getVeeamFile = require('./veeam/get');
const headVeeamFile = require('./veeam/head');
const listVeeamFiles = require('./veeam/list');
const { deleteVeeamFile } = require('./veeam/delete');
const { auth, s3routes, errors } = require('arsenal');
const { _decodeURI, validPath } = require('./veeam/utils');
const { routesUtils } = require('arsenal/build/lib/s3routes');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const prepareRequestContexts = require('../api/apiUtils/authorization/prepareRequestContexts');
const { responseXMLBody } = s3routes.routesUtils;
auth.setHandler(vault);
const validObjectKeys = [
`${validPath}system.xml`,
`${validPath}capacity.xml`,
];
const apiToAction = {
PUT: 'PutObject',
GET: 'GetObject',
HEAD: 'HeadObject',
DELETE: 'DeleteObject',
LIST: 'ListObjects',
};
const routeMap = {
GET: getVeeamFile,
PUT: putVeeamFile,
HEAD: headVeeamFile,
DELETE: deleteVeeamFile,
LIST: listVeeamFiles,
};
/**
* Validator for the Veeam12 custom routes. Ensures that bucket name and
* object name are correct, and that the bucket exists in the DB.
* @param {string} bucketName - name of the bucket
* @param {string} objectKey - key of the object
* @param {array | null} requestQueryParams - request query parameters
* @param {string} method - HTTP verb
* @param {object} log - request logger
* @returns {Error | undefined} error or undefined
*/
function checkBucketAndKey(bucketName, objectKey, requestQueryParams, method, log) {
// In case bucket name is not specified and the request contains an
// object key or is not a GET, then the bucket name is mandatory.
// Reject the request in this case.
if (!bucketName && !(method === 'GET' && !objectKey)) {
log.debug('empty bucket name', { method: 'checkBucketAndKey' });
return errors.MethodNotAllowed;
}
if (typeof bucketName !== 'string' || routesUtils.isValidBucketName(bucketName, []) === false) {
log.debug('invalid bucket name', { bucketName });
if (method === 'DELETE') {
return errors.NoSuchBucket;
}
return errors.InvalidBucketName;
}
if (method !== 'LIST') {
// Reject any unsupported request, but allow downloads and deletes from UI
// Download relies on GETs calls with auth in query parameters, that can be
// checked if 'X-Amz-Credential' is included.
// Deletion requires that the tags of the object are returned.
if (requestQueryParams && Object.keys(requestQueryParams).length > 0
&& !(method === 'GET' && (requestQueryParams['X-Amz-Credential'] || ('tagging' in requestQueryParams)))) {
return errors.InvalidRequest
.customizeDescription('The Veeam SOSAPI folder does not support this action.');
}
if (typeof objectKey !== 'string' || !validObjectKeys.includes(objectKey)) {
log.debug('invalid object name', { objectKey });
return errors.InvalidArgument;
}
}
return undefined;
}
/**
* Query the authorization service for the request, and extract the bucket
* and, if applicable, object metadata according to the request method.
*
* @param {object} request - incoming request
* @param {object} response - response object
* @param {string} api - HTTP verb
* @param {object} log - logger instance
* @param {function} callback -
* @returns {undefined}
*/
function authorizationMiddleware(request, response, api, log, callback) {
if (!api) {
return responseXMLBody(errors.AccessDenied, null, response, log);
}
const requestContexts = prepareRequestContexts(api, request);
return async.waterfall([
next => auth.server.doAuth(request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) {
log.debug('authentication error', {
error: err,
method: request.method,
bucketName: request.bucketName,
objectKey: request.objectKey,
});
}
/* eslint-disable no-param-reassign */
request.authorizationResults = authorizationResults;
request.streamingV4Params = streamingV4Params;
/* eslint-enable no-param-reassign */
return next(err, userInfo);
}, 's3', requestContexts),
(userInfo, next) => {
// Ensure only supported HTTP verbs and actions are called,
// otherwise deny access
const requestType = apiToAction[api];
if (!requestType) {
return next(errors.AccessDenied);
}
const mdValParams = {
bucketName: request.bucketName,
authInfo: userInfo,
requestType,
request,
};
return next(null, mdValParams);
},
(mdValParams, next) => standardMetadataValidateBucket(mdValParams, request.actionImplicitDenies, log, next),
], (err, bucketMd) => {
if (err || !bucketMd) {
return responseXMLBody(err, null, response, log);
}
return callback(request, response, bucketMd, log);
});
}
function _normalizeVeeamRequest(req) {
/* eslint-disable no-param-reassign */
// Rewriting the URL is needed for the V4 signature check: the initial
// request targets https://s3.subdomain/bucketName/objectKey, but the
// custom ingresses and/or nginx configuration for the UI will redirect this
// call to .../_/veeam/bucketName/objectKey. We need to revert the custom
// path only used for routing before computing the V4 signature.
req.url = req.url.replace('/_/veeam', '');
// Assign multiple common (extracted) parameters to the request object
const parsedUrl = url.parse(req.url, true);
req.path = _decodeURI(parsedUrl.pathname);
const pathArr = req.path.split('/');
req.query = parsedUrl.query;
req.bucketName = pathArr[1];
req.objectKey = pathArr.slice(2).join('/');
const contentLength = req.headers['x-amz-decoded-content-length'] ?
req.headers['x-amz-decoded-content-length'] :
req.headers['content-length'];
req.parsedContentLength =
Number.parseInt(contentLength?.toString() ?? '', 10);
/* eslint-enable no-param-reassign */
}
/**
* Ensure only supported methods are supported, otherwise, return an error
* @param {string} reqMethod - the HTTP verb of the request
* @param {string} reqQuery - request query
* @param {object} reqHeaders - request headers
* @returns {object} - method or error
*/
function checkUnsupportedRoutes(reqMethod, reqQuery, reqHeaders) {
const method = routeMap[reqMethod];
if (!method || (!reqQuery && !reqHeaders)) {
return { error: errors.MethodNotAllowed };
}
return { method };
}
/**
* Router for the Veeam custom files
* @param {string} clientIP - client IP address
* @param {object} request - request object
* @param {object} response - response object
* @param {object} log - requets logger
* @returns {undefined}
*/
function routeVeeam(clientIP, request, response, log) {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = 'routeVeeam';
_normalizeVeeamRequest(request);
log.info('routing request', {
method: 'routeVeeam',
url: request.url,
clientIP,
resourceType: request.resourceType,
subResource: request.subResource,
});
// Rewrite action to LIST for list-objects
const requestMethod = request.method === 'GET' && !request.objectKey ? 'LIST' : request.method;
const { error, method } = checkUnsupportedRoutes(requestMethod, request.query, request.headers);
if (error) {
log.error('error validating route or uri params', { error });
return responseXMLBody(error, '', response, log);
}
const bucketOrKeyError = checkBucketAndKey(
request.bucketName, request.objectKey, request.query, requestMethod, log);
if (bucketOrKeyError) {
log.error('error with bucket or key value',
{ error: bucketOrKeyError });
return routesUtils.responseXMLBody(bucketOrKeyError, null, response, log);
}
return authorizationMiddleware(request, response, requestMethod, log, method);
}
module.exports = {
routeVeeam,
checkUnsupportedRoutes,
_normalizeVeeamRequest,
authorizationMiddleware,
checkBucketAndKey,
validObjectKeys,
};

View File

@ -0,0 +1,72 @@
const { s3routes, errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { isSystemXML } = require('./utils');
const { responseXMLBody, responseNoBody } = s3routes.routesUtils;
/**
* Deletes system.xml or capacity.xml files for a given bucket.
*
* @param {string} bucketName - bucket name
* @param {string} objectKey - object key to delete
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @param {function} callback - callback
* @returns {undefined} -
*/
function deleteVeeamCapabilities(bucketName, objectKey, bucketMd, log, callback) {
const capabilityFieldName = isSystemXML(objectKey) ? 'SystemInfo' : 'CapacityInfo';
// Ensure file exists in metadata before deletion
if (!bucketMd._capabilities?.VeeamSOSApi
|| !bucketMd._capabilities?.VeeamSOSApi[capabilityFieldName]) {
return callback(errors.NoSuchKey);
}
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities.VeeamSOSApi[capabilityFieldName];
// Delete the whole veeam capacity if nothing is left
if (Object.keys(bucketMd._capabilities.VeeamSOSApi).length === 0) {
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities.VeeamSOSApi;
// Delete all capacities if no capacity is left
if (Object.keys(bucketMd._capabilities).length === 0) {
// eslint-disable-next-line no-param-reassign
delete bucketMd._capabilities;
}
}
// Update the bucket metadata
return metadata.deleteBucketCapabilities(bucketName, bucketMd, 'VeeamSOSApi', capabilityFieldName, log, err => {
if (err) {
return callback(err);
}
return callback();
});
}
/**
* Deletes system.xml or capacity.xml files for a given bucket. handle
* request context for custom routes.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function deleteVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
return deleteVeeamCapabilities(request.bucketName, request.objectKey, bucketMd, log, err => {
if (err) {
return responseXMLBody(err, null, response, log);
}
return responseNoBody(null, null, response, 204, log);
});
}
module.exports = {
deleteVeeamFile,
deleteVeeamCapabilities,
};

46
lib/routes/veeam/get.js Normal file
View File

@ -0,0 +1,46 @@
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { respondWithData, buildHeadXML, getFileToBuild } = require('./utils');
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
/**
* Returns system.xml or capacity.xml files for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function getVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
if ('tagging' in request.query) {
return respondWithData(request, response, log, bucketMd,
buildHeadXML('<Tagging><TagSet></TagSet></Tagging>'));
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
if (fileToBuild.error) {
return responseXMLBody(fileToBuild.error, null, response, log);
}
let modified = new Date().toISOString();
// Extract the last modified date, but do not include it when computing
// the file's ETag (md5)
modified = fileToBuild.value.LastModified;
delete fileToBuild.value.LastModified;
const builder = new xml2js.Builder({
headless: true,
});
return respondWithData(request, response, log, data,
buildHeadXML(builder.buildObject(fileToBuild.value)), modified);
});
}
module.exports = getVeeamFile;

43
lib/routes/veeam/head.js Normal file
View File

@ -0,0 +1,43 @@
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const { getResponseHeader, buildHeadXML, getFileToBuild } = require('./utils');
const { responseXMLBody, responseContentHeaders } = require('arsenal/build/lib/s3routes/routesUtils');
/**
* Returns system.xml or capacity.xml files metadata for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function headVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const fileToBuild = getFileToBuild(request, data._capabilities?.VeeamSOSApi);
if (fileToBuild.error) {
return responseXMLBody(fileToBuild.error, null, response, log);
}
let modified = new Date().toISOString();
// Extract the last modified date, but do not include it when computing
// the file's ETag (md5)
modified = fileToBuild.value.LastModified;
delete fileToBuild.value.LastModified;
// Recompute file content to generate appropriate content-md5 header
const builder = new xml2js.Builder({
headless: true,
});
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(fileToBuild)));
return responseContentHeaders(null, {}, getResponseHeader(request, data,
dataBuffer, modified, log), response, log);
});
}
module.exports = headVeeamFile;

132
lib/routes/veeam/list.js Normal file
View File

@ -0,0 +1,132 @@
const url = require('url');
const xml2js = require('xml2js');
const { errors } = require('arsenal');
const querystring = require('querystring');
const metadata = require('../../metadata/wrapper');
const { responseXMLBody } = require('arsenal/build/lib/s3routes/routesUtils');
const { respondWithData, getResponseHeader, buildHeadXML, validPath } = require('./utils');
const { processVersions, processMasterVersions } = require('../../api/bucketGet');
/**
* Utility function to build a standard response for the LIST route.
* It adds the supported path by default as a static and default file.
*
* @param {object} request - request object
* @param {object} arrayOfFiles - array of files headers
* @param {boolean} [versioned] - set to true if versioned listing is enabled
* @returns {string} - the formatted XML content to send
*/
function buildXMLResponse(request, arrayOfFiles, versioned = false) {
const parsedUrl = url.parse(request.url);
const parsedQs = querystring.parse(parsedUrl.query);
const listParams = {
prefix: validPath,
maxKeys: parsedQs['max-keys'] || 1000,
delimiter: '/',
};
const list = {
IsTruncated: false,
Versions: [],
Contents: [],
CommonPrefixes: [],
};
const entries = arrayOfFiles.map(file => ({
key: file.name,
value: {
IsDeleteMarker: false,
IsNull: true,
LastModified: file['Last-Modified'],
// Generated ETag alrady contains quotes, removing them here
ETag: file.ETag.substring(1, file.ETag.length - 1),
Size: file['Content-Length'],
Owner: {
ID: 0,
DisplayName: 'Veeam SOSAPI',
},
StorageClass: 'VIRTUAL',
}
}));
entries.push({
key: validPath,
value: {
IsDeleteMarker: false,
IsNull: true,
LastModified: new Date().toISOString(),
ETag: 'd41d8cd98f00b204e9800998ecf8427e',
Size: 0,
Owner: {
ID: 0,
DisplayName: 'Veeam SOSAPI',
},
StorageClass: 'VIRTUAL',
}
});
// Add the folder as the base file
if (versioned) {
list.Versions = entries;
} else {
list.Contents = entries;
}
const processingXMLFunction = versioned ? processVersions : processMasterVersions;
return processingXMLFunction(request.bucketName, listParams, list);
}
/**
* List system.xml and/or capacity.xml files for a given bucket.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function listVeeamFiles(request, response, bucketMd, log) {
if (!bucketMd) {
return responseXMLBody(errors.NoSuchBucket, null, response, log);
}
// Only accept list-type query parameter
if (!('list-type' in request.query) && !('versions' in request.query)) {
return responseXMLBody(errors.InvalidRequest
.customizeDescription('The Veeam folder does not support this action.'), null, response, log);
}
return metadata.getBucket(request.bucketName, log, (err, data) => {
if (err) {
return responseXMLBody(errors.InternalError, null, response, log);
}
const filesToBuild = [];
const fieldsToGenerate = [];
if (data._capabilities?.VeeamSOSApi?.SystemInfo) {
fieldsToGenerate.push({
...data._capabilities?.VeeamSOSApi?.SystemInfo,
name: `${validPath}system.xml`,
});
}
if (data._capabilities?.VeeamSOSApi?.CapacityInfo) {
fieldsToGenerate.push({
...data._capabilities?.VeeamSOSApi?.CapacityInfo,
name: `${validPath}capacity.xml`,
});
}
fieldsToGenerate.forEach(file => {
const lastModified = file.LastModified;
// eslint-disable-next-line no-param-reassign
delete file.LastModified;
const builder = new xml2js.Builder({
headless: true,
});
const dataBuffer = Buffer.from(buildHeadXML(builder.buildObject(file)));
filesToBuild.push({
...getResponseHeader(request, data,
dataBuffer, lastModified, log),
name: file.name,
});
});
// When `versions` is present, listing should return a versioned list
return respondWithData(request, response, log, data,
buildXMLResponse(request, filesToBuild, 'versions' in request.query));
});
}
module.exports = listVeeamFiles;

80
lib/routes/veeam/put.js Normal file
View File

@ -0,0 +1,80 @@
const async = require('async');
const { parseString } = require('xml2js');
const { receiveData, isSystemXML, getFileToBuild } = require('./utils');
const { s3routes, errors } = require('arsenal');
const metadata = require('../../metadata/wrapper');
const parseSystemSchema = require('./schemas/system');
const parseCapacitySchema = require('./schemas/capacity');
const writeContinue = require('../../utilities/writeContinue');
const { responseNoBody, responseXMLBody } = s3routes.routesUtils;
/**
* Puts a veeam capacity or system file in the bucket metadata.
* Logic ensures consistency of the data and metadata.
*
* @param {object} request - request object
* @param {object} response - response object
* @param {object} bucketMd - bucket metadata from the db
* @param {object} log - logger object
* @returns {undefined} -
*/
function putVeeamFile(request, response, bucketMd, log) {
if (!bucketMd) {
return errors.NoSuchBucket;
}
return async.waterfall([
next => {
// Extract the data from the request, keep it in memory
writeContinue(request, response);
return receiveData(request, log, next);
},
(value, next) => parseString(value, { explicitArray: false }, (err, parsed) => {
// Convert the received XML to a JS object
if (err) {
return next(errors.MalformedXML);
}
return next(null, parsed);
}),
(parsedXML, next) => {
const capabilities = bucketMd._capabilities || {
VeeamSOSApi: {},
};
// Validate the JS object schema with joi and prepare the object for
// further logic
const validateFn = isSystemXML(request.objectKey) ? parseSystemSchema : parseCapacitySchema;
let validatedData = null;
try {
validatedData = validateFn(parsedXML);
} catch (err) {
log.error('xml file did not pass validation', { err });
return next(errors.MalformedXML);
}
const file = getFileToBuild(request, validatedData, true);
if (file.error) {
return next(file.error);
}
capabilities.VeeamSOSApi = {
...(capabilities.VeeamSOSApi || {}),
...file.value,
};
// Write data to bucketMD with the same (validated) format
// eslint-disable-next-line no-param-reassign
bucketMd = {
...bucketMd,
_capabilities: capabilities,
};
// Update bucket metadata
return metadata.updateBucketCapabilities(
request.bucketName, bucketMd, 'VeeamSOSApi', file.fieldName, file.value[file.fieldName], log, next);
}
], err => {
if (err) {
return responseXMLBody(err, null, response, log);
}
return responseNoBody(null, null, response, 200, log);
});
}
module.exports = putVeeamFile;

View File

@ -0,0 +1,38 @@
const joi = require('joi');
const { errors } = require('arsenal');
/**
* Validates and parse the provided JSON object from the
* provided XML file. XML scheme example:
*
* <?xml version="1.0" encoding="utf-8" ?>
* <CapacityInfo>
* <Capacity>1099511627776</Capacity>
* <Available>0</Available>
* <Used>0</Used>
* </CapacityInfo>
*
* @param {string} parsedXML - the parsed XML from xml2js
* @returns {object | Error} the valid system.xml JS object or an error if
* validation fails
*/
function validateCapacitySchema(parsedXML) {
const schema = joi.object({
CapacityInfo: joi.object({
Capacity: joi.number().min(-1).integer().required(),
Available: joi.number().min(-1).integer().required(),
Used: joi.number().min(-1).integer().required(),
}).required(),
});
const validatedData = schema.validate(parsedXML, {
// Allow any unknown keys for future compatibility
allowUnknown: true,
convert: true,
});
if (validatedData.error) {
throw new Error(errors.MalformedXML);
}
return validatedData.value;
}
module.exports = validateCapacitySchema;

View File

@ -0,0 +1,95 @@
const joi = require('joi');
const { errors } = require('arsenal');
// Allow supporting any version of the protocol
const systemSchemasPerVersion = {
'unsupported': joi.object({}),
'"1.0"': joi.object({
SystemInfo: joi.object({
ProtocolVersion: joi.string().required(),
ModelName: joi.string().required(),
ProtocolCapabilities: joi.object({
CapacityInfo: joi.boolean().required(),
UploadSessions: joi.boolean().required(),
IAMSTS: joi.boolean().default(false),
}).required(),
APIEndpoints: joi.object({
IAMEndpoint: joi.string().required(),
STSEndpoint: joi.string().required()
}),
SystemRecommendations: joi.object({
S3ConcurrentTaskLimit: joi.number().min(0).default(64),
S3MultiObjectDeleteLimit: joi.number().min(1).default(1000),
StorageCurrentTasksLimit: joi.number().min(0).default(0),
KbBlockSize: joi.number()
.valid(256, 512, 1024, 2048, 4096, 8192)
.default(1024),
}),
}).required()
}),
};
/**
* Validates and parse the provided JSON object from the
* provided XML file. XML scheme example:
*
* <?xml version="1.0" encoding="utf-8" ?>
* <SystemInfo>
* <ProtocolVersion>"1.0"</ProtocolVersion>
* <ModelName>"ACME corp - Custom S3 server - v1.2"</ModelName>
* <ProtocolCapabilities>
* <CapacityInfo>true</CapacityInfo>
* <UploadSessions>true</UploadSessions>
* <IAMSTS>true</IAMSTS>
* </ProtocolCapabilities>
* <APIEndpoints>
* <IAMEndpoint>https://storage.acme.local/iam/endpoint</IAMEndpoint>
* <STSEndpoint>https://storage.acme.local/sts/endpoint</STSEndpoint>
* </APIEndpoints>
* <SystemRecommendations>
* <S3ConcurrentTaskLimit>64</S3ConcurrentTaskLimit>
* <S3MultiObjectDeleteLimit>1000</S3MultiObjectDeleteLimit>
* <StorageCurrentTaksLimit>0</StorageCurrentTaskLimit>
* <KbBlockSize>1024</KbBlockSize>
* </SystemRecommendations>
* </SystemInfo>
*
* @param {string} parsedXML - the parsed XML from xml2js
* @returns {object | Error} the valid system.xml JS object or an error if
* validation fails
*/
function validateSystemSchema(parsedXML) {
const protocolVersion = parsedXML?.SystemInfo?.ProtocolVersion;
let schema = systemSchemasPerVersion.unsupported;
if (!protocolVersion) {
throw new Error(errors.MalformedXML
.customizeDescription('ProtocolVersion must be set for the system.xml file'));
}
if (protocolVersion && protocolVersion in systemSchemasPerVersion) {
schema = systemSchemasPerVersion[parsedXML?.SystemInfo?.ProtocolVersion];
}
const validatedData = schema.validate(parsedXML, {
// Allow any unknown keys for future compatibility
allowUnknown: true,
convert: true,
});
if (validatedData.error) {
throw validatedData.error;
} else {
switch (protocolVersion) {
case '"1.0"':
// Ensure conditional fields are set
// IAMSTS === true implies that SystemInfo.APIEndpoints is defined
if (validatedData.value.SystemInfo.ProtocolCapabilities.IAMSTS
&& !validatedData.value.SystemInfo.APIEndpoints) {
throw new Error(errors.MalformedXML);
}
break;
default:
break;
}
}
return validatedData.value;
}
module.exports = validateSystemSchema;

211
lib/routes/veeam/utils.js Normal file
View File

@ -0,0 +1,211 @@
const { errors, jsutil } = require('arsenal');
const { Readable } = require('stream');
const collectResponseHeaders = require('../../utilities/collectResponseHeaders');
const collectCorsHeaders = require('../../utilities/collectCorsHeaders');
const crypto = require('crypto');
const { prepareStream } = require('arsenal/build/lib/s3middleware/prepareStream');
/**
* Decodes an URI and return the result.
* Do the same decoding than in S3 server
* @param {string} uri - uri to decode
* @returns {string} -
*/
function _decodeURI(uri) {
return decodeURIComponent(uri.replace(/\+/g, ' '));
}
/**
* Generic function to get data from a client request.
*
* @param {object} request - incoming request
* @param {object} log - logger object
* @param {function} callback -
* @returns {undefined}
*/
function receiveData(request, log, callback) {
// Get keycontent
const { parsedContentLength } = request;
const ContentLengthThreshold = 1024 * 1024; // 1MB
// Prevent memory overloads by limiting the size of the
// received data.
if (parsedContentLength > ContentLengthThreshold) {
return callback(errors.InvalidInput
.customizeDescription(`maximum allowed content-length is ${ContentLengthThreshold} bytes`));
}
const value = Buffer.alloc(parsedContentLength);
const cbOnce = jsutil.once(callback);
const dataStream = prepareStream(request, request.streamingV4Params, log, cbOnce);
let cursor = 0;
let exceeded = false;
dataStream.on('data', data => {
if (cursor + data.length > parsedContentLength) {
exceeded = true;
}
if (!exceeded) {
data.copy(value, cursor);
}
cursor += data.length;
});
dataStream.on('end', () => {
if (exceeded) {
log.error('data stream exceed announced size',
{ parsedContentLength, overflow: cursor });
return callback(errors.InternalError);
} else {
return callback(null, value.toString());
}
});
return undefined;
}
/**
* Builds a valid XML file for SOSAPI
*
* @param {string} xmlContent - valid xml content
* @returns {string} a valid and formatted XML file
*/
function buildHeadXML(xmlContent) {
return `<?xml version="1.0" encoding="UTF-8" ?>\n${xmlContent}\n`;
}
/**
* Get response headers for the object
* @param {object} request - incoming request
* @param {BucketInfo} bucket - bucket
* @param {string} dataBuffer - data to send as a buffer
* @param {date} [lastModified] - last modified date of the value
* @param {object} log - logging object
* @returns {object} - response headers
*/
function getResponseHeader(request, bucket, dataBuffer, lastModified, log) {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
const responseMetaHeaders = collectResponseHeaders({
'last-modified': lastModified || new Date().toISOString(),
'content-md5': crypto
.createHash('md5')
.update(dataBuffer)
.digest('hex'),
'content-length': dataBuffer.byteLength,
'content-type': 'text/xml',
}, corsHeaders, null, false);
responseMetaHeaders.versionId = 'null';
responseMetaHeaders['x-amz-id-2'] = log.getSerializedUids();
responseMetaHeaders['x-amz-request-id'] = log.getSerializedUids();
return responseMetaHeaders;
}
/**
* Generic function to respond to user with data using streams
*
* @param {object} request - incoming request
* @param {object} response - response object
* @param {object} log - logging object
* @param {BucketInfo} bucket - bucket info
* @param {string} data - data to send
* @param {date} [lastModified] - last modified date of the value
* @returns {undefined} -
*/
function respondWithData(request, response, log, bucket, data, lastModified) {
const dataBuffer = Buffer.from(data);
const responseMetaHeaders = getResponseHeader(request, bucket, dataBuffer, lastModified, log);
response.on('finish', () => {
let contentLength = 0;
if (responseMetaHeaders && responseMetaHeaders['Content-Length']) {
contentLength = responseMetaHeaders['Content-Length'];
}
log.end().addDefaultFields({ contentLength });
log.end().info('responded with streamed content', {
httpCode: response.statusCode,
});
});
if (responseMetaHeaders && typeof responseMetaHeaders === 'object') {
Object.keys(responseMetaHeaders).forEach(key => {
if (responseMetaHeaders[key] !== undefined) {
try {
response.setHeader(key, responseMetaHeaders[key]);
} catch (e) {
log.debug('header can not be added ' +
'to the response', {
header: responseMetaHeaders[key],
error: e.stack, method: 'routeVeeam/respondWithData'
});
}
}
});
}
response.writeHead(200);
const stream = Readable.from(dataBuffer);
stream.pipe(response);
stream.on('unpipe', () => {
response.end();
});
stream.on('error', () => {
response.end();
});
}
const validPath = '.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/';
/**
* Helper to determine if the current requested file is system.xml
*
* @param {string} objectKey - object key
* @returns {boolean} - true if the object key ends with `/system.xml`
*/
function isSystemXML(objectKey) {
return objectKey.endsWith('/system.xml');
}
/**
* Helper to extract the file from the bucket metadata
*
* @param {object} request - incoming request
* @param {object} data - the bucket metadata or input data
* @param {boolean} inlineLastModified - true if LastModified should be in the returned object
* or as another standalone field
* @returns {error | object} - error if file does not exist, or
* the associated metadata
*/
function getFileToBuild(request, data, inlineLastModified = false) {
const _isSystemXML = isSystemXML(request.objectKey);
const fileToBuild = _isSystemXML ? data?.SystemInfo
: data?.CapacityInfo;
if (!fileToBuild) {
return { error: errors.NoSuchKey };
}
const modified = fileToBuild.LastModified || (new Date()).toISOString();
const fieldName = _isSystemXML ? 'SystemInfo' : 'CapacityInfo';
if (inlineLastModified) {
fileToBuild.LastModified = modified;
return {
value: {
[fieldName]: fileToBuild,
},
fieldName,
};
} else {
delete fileToBuild.LastModified;
return {
value: {
[fieldName]: fileToBuild,
LastModified: modified,
},
fieldName,
};
}
}
module.exports = {
_decodeURI,
receiveData,
respondWithData,
getResponseHeader,
buildHeadXML,
validPath,
isSystemXML,
getFileToBuild,
};

View File

@ -18,13 +18,9 @@ const locationStorageCheck =
require('./api/apiUtils/object/locationStorageCheck');
const vault = require('./auth/vault');
const metadata = require('./metadata/wrapper');
const { initManagement } = require('./management');
const {
initManagementClient,
isManagementAgentUsed,
} = require('./management/agentClient');
const HttpAgent = require('agentkeepalive');
const QuotaService = require('./quotas/quotas');
const routes = arsenal.s3routes.routes;
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
const websiteEndpoints = _config.websiteEndpoints;
@ -55,7 +51,6 @@ const STATS_INTERVAL = 5; // 5 seconds
const STATS_EXPIRY = 30; // 30 seconds
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
STATS_EXPIRY);
const enableRemoteManagement = true;
class S3Server {
/**
@ -321,16 +316,9 @@ class S3Server {
this._startServer(this.routeAdminRequest, _config.metricsPort);
}
// TODO this should wait for metadata healthcheck to be ok
// TODO only do this in cluster master
if (enableRemoteManagement) {
if (!isManagementAgentUsed()) {
setTimeout(() => {
initManagement(logger.newRequestLogger());
}, 5000);
} else {
initManagementClient();
}
// Start quota service health checks
if (QuotaService.enabled) {
QuotaService?.setup(log);
}
this.started = true;
@ -339,8 +327,7 @@ class S3Server {
}
function main() {
// TODO: change config to use workers prop. name for clarity
let workers = _config.clusters || 1;
let workers = _config.workers || 1;
if (process.env.S3BACKEND === 'mem') {
workers = 1;
}

View File

@ -109,7 +109,7 @@ const services = {
tagging, taggingCopy, replicationInfo, defaultRetention,
dataStoreName, creationTime, retentionMode, retentionDate,
legalHold, originOp, updateMicroVersionId, archive, oldReplayId,
deleteNullKey, overheadField } = params;
deleteNullKey, amzStorageClass, overheadField } = params;
log.trace('storing object in metadata');
assert.strictEqual(typeof bucketName, 'string');
const md = new ObjectMD();
@ -186,6 +186,7 @@ const services = {
}
// update restore
if (archive) {
md.setAmzStorageClass(amzStorageClass);
md.setArchive(new ObjectMDArchive(
archive.archiveInfo,
archive.restoreRequestedAt,
@ -262,6 +263,11 @@ const services = {
if (legalHold) {
md.setLegalHold(legalHold);
}
if (params.acl) {
// In case of a restore we dont pass ACL in the headers
// but we take them from the old metadata
md.setAcl(params.acl);
}
log.trace('object metadata', { omVal: md.getValue() });
// If this is not the completion of a multipart upload or
@ -327,10 +333,11 @@ const services = {
* @param {boolean} deferLocationDeletion - true if the object should not
* be removed from the storage, but be returned instead.
* @param {Log} log - logger instance
* @param {string} originOp - origin operation
* @param {function} cb - callback from async.waterfall in objectGet
* @return {undefined}
*/
deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, cb) {
deleteObject(bucketName, objectMD, objectKey, options, deferLocationDeletion, log, originOp, cb) {
log.trace('deleting object from bucket');
assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof objectMD, 'object');
@ -362,7 +369,7 @@ const services = {
}
return cb(null, res);
});
});
}, originOp);
}
const objGetInfo = objectMD.location;

View File

@ -1,16 +1,12 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const _config = require('../Config').config;
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
const vault = require('../auth/vault');
// start utapi server
if (utapiVersion === 1 && _config.utapi) {
const fullConfig = Object.assign({}, _config.utapi,
{ redis: _config.redis });
if (_config.vaultd) {
Object.assign(fullConfig, { vaultd: _config.vaultd });
}
if (_config.https) {
Object.assign(fullConfig, { https: _config.https });
}
{ redis: _config.redis, vaultclient: vault });
// copy healthcheck IPs
if (_config.healthChecks) {
Object.assign(fullConfig, { healthChecks: _config.healthChecks });

View File

@ -1,3 +1,4 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReindex = require('utapi').UtapiReindex;
const { config } = require('../Config');

View File

@ -1,3 +1,4 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReplay = require('utapi').UtapiReplay;
const _config = require('../Config').config;

View File

@ -3,12 +3,14 @@ const routeMetadata = require('../routes/routeMetadata');
const routeWorkflowEngineOperator =
require('../routes/routeWorkflowEngineOperator');
const { reportHandler } = require('./reportHandler');
const routeVeeam = require('../routes/routeVeeam').routeVeeam;
const internalHandlers = {
backbeat: routeBackbeat,
report: reportHandler,
metadata: routeMetadata,
'workflow-engine-operator': routeWorkflowEngineOperator,
veeam: routeVeeam,
};
module.exports = {

View File

@ -1,7 +1,11 @@
const { Werelogs } = require('werelogs');
const { configure, Werelogs } = require('werelogs');
const _config = require('../Config.js').config;
configure({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,
});
const werelogs = new Werelogs({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,

View File

@ -1,5 +1,6 @@
const { errors } = require('arsenal');
const client = require('prom-client');
const { config } = require('../Config');
const collectDefaultMetrics = client.collectDefaultMetrics;
const numberOfBuckets = new client.Gauge({
@ -64,6 +65,61 @@ const httpResponseSizeBytes = new client.Summary({
help: 'Cloudserver HTTP response sizes in bytes',
});
let quotaEvaluationDuration;
let utilizationMetricsRetrievalDuration;
let utilizationServiceAvailable;
let bucketsWithQuota;
let accountsWithQuota;
let requestWithQuotaMetricsUnavailable;
if (config.isQuotaEnabled) {
quotaEvaluationDuration = new client.Histogram({
name: 's3_cloudserver_quota_evaluation_duration_seconds',
help: 'Duration of the quota evaluation operation',
labelNames: ['action', 'code', 'type'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1],
});
utilizationMetricsRetrievalDuration = new client.Histogram({
name: 's3_cloudserver_quota_metrics_retrieval_duration_seconds',
help: 'Duration of the utilization metrics retrieval operation',
labelNames: ['code', 'class'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5],
});
utilizationServiceAvailable = new client.Gauge({
name: 's3_cloudserver_quota_utilization_service_available',
help: 'Availability of the utilization service',
});
bucketsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_buckets_count',
help: 'Total number of buckets quota',
});
accountsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_accounts_count',
help: 'Total number of account quota',
});
requestWithQuotaMetricsUnavailable = new client.Counter({
name: 's3_cloudserver_quota_unavailable_count',
help: 'Total number of requests with quota metrics unavailable',
});
}
// Lifecycle duration metric, to track the completion of restore.
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
// labels and buckets.
const lifecycleDuration = new client.Histogram({
name: 's3_lifecycle_duration_seconds',
help: 'Duration of the lifecycle operation, calculated from the theoretical date to the end ' +
'of the operation',
labelNames: ['type', 'location'],
buckets: [0.2, 1, 5, 30, 120, 600, 3600, 4 * 3600, 8 * 3600, 16 * 3600, 24 * 3600],
});
function promMetrics(method, bucketName, code, action,
newByteLength, oldByteLength, isVersionedObj,
numOfObjectsRemoved, ingestSize) {
@ -131,6 +187,10 @@ function crrCacheToProm(crrResults) {
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
}
if (config.isQuotaEnabled) {
bucketsWithQuota.set(crrResults?.getObjectCount?.bucketWithQuotaCount || 0);
accountsWithQuota.set(crrResults?.getVaultReport?.accountWithQuotaCount || 0);
}
if (crrResults.getDataDiskUsage) {
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
@ -207,4 +267,10 @@ module.exports = {
httpRequestDurationSeconds,
httpRequestsTotal,
httpActiveRequests,
lifecycleDuration,
quotaEvaluationDuration,
utilizationMetricsRetrievalDuration,
utilizationServiceAvailable,
bucketsWithQuota,
requestWithQuotaMetricsUnavailable,
};

View File

@ -10,6 +10,7 @@ const config = require('../Config').config;
const { data } = require('../data/wrapper');
const metadata = require('../metadata/wrapper');
const monitoring = require('../utilities/monitoringHandler');
const vault = require('../auth/vault');
const REPORT_MODEL_VERSION = 1;
const ASYNCLIMIT = 5;
@ -461,6 +462,7 @@ function reportHandler(clientIP, req, res, log) {
getCRRMetrics: cb => getCRRMetrics(log, cb),
getReplicationStates: cb => getReplicationStates(log, cb),
getIngestionInfo: cb => getIngestionInfo(log, cb),
getVaultReport: cb => vault.report(log, cb),
},
(err, results) => {
if (err) {
@ -488,6 +490,7 @@ function reportHandler(clientIP, req, res, log) {
capabilities: getCapabilities(),
ingestStats: results.getIngestionInfo.metrics,
ingestStatus: results.getIngestionInfo.status,
vaultReport: results.getVaultReport,
};
monitoring.crrCacheToProm(results);
res.writeHead(200, { 'Content-Type': 'application/json' });

View File

@ -101,5 +101,12 @@
"legacyAwsBehavior": false,
"isCold": true,
"details": {}
},
"location-azure-archive-v1": {
"type": "azure_archive",
"objectId": "location-azure-archive-v1",
"legacyAwsBehavior": false,
"isCold": true,
"details": {}
}
}

View File

@ -0,0 +1,12 @@
{
"STANDARD": {
"type": "vitastor",
"objectId": "std",
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 3,
"metadata_image": "s3-volume-meta"
}
}
}

View File

@ -1,179 +0,0 @@
const Uuid = require('uuid');
const WebSocket = require('ws');
const logger = require('./lib/utilities/logger');
const { initManagement } = require('./lib/management');
const _config = require('./lib/Config').config;
const { managementAgentMessageType } = require('./lib/management/agentClient');
const { addOverlayMessageListener } = require('./lib/management/push');
const { saveConfigurationVersion } = require('./lib/management/configuration');
// TODO: auth?
// TODO: werelogs with a specific name.
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
class ManagementAgentServer {
constructor() {
this.port = _config.managementAgent.port || 8010;
this.wss = null;
this.loadedOverlay = null;
this.stop = this.stop.bind(this);
process.on('SIGINT', this.stop);
process.on('SIGHUP', this.stop);
process.on('SIGQUIT', this.stop);
process.on('SIGTERM', this.stop);
process.on('SIGPIPE', () => {});
}
start(_cb) {
const cb = _cb || function noop() {};
/* Define REPORT_TOKEN env variable needed by the management
* module. */
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|| _config.reportToken
|| Uuid.v4();
initManagement(logger.newRequestLogger(), overlay => {
let error = null;
if (overlay) {
this.loadedOverlay = overlay;
this.startServer();
} else {
error = new Error('failed to init management');
}
return cb(error);
});
}
stop() {
if (!this.wss) {
process.exit(0);
return;
}
this.wss.close(() => {
logger.info('server shutdown');
process.exit(0);
});
}
startServer() {
this.wss = new WebSocket.Server({
port: this.port,
clientTracking: true,
path: '/watch',
});
this.wss.on('connection', this.onConnection.bind(this));
this.wss.on('listening', this.onListening.bind(this));
this.wss.on('error', this.onError.bind(this));
setInterval(this.checkBrokenConnections.bind(this),
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
addOverlayMessageListener(this.onNewOverlay.bind(this));
}
onConnection(socket, request) {
function hearthbeat() {
this.isAlive = true;
}
logger.info('client connected to watch route', {
ip: request.connection.remoteAddress,
});
/* eslint-disable no-param-reassign */
socket.isAlive = true;
socket.on('pong', hearthbeat.bind(socket));
if (socket.readyState !== socket.OPEN) {
logger.error('client socket not in ready state', {
state: socket.readyState,
client: socket._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
socket.send(JSON.stringify(msg), error => {
if (error) {
logger.error('failed to send remoteOverlay to client', {
error,
client: socket._socket._peername,
});
}
});
}
onListening() {
logger.info('websocket server listening',
{ port: this.port });
}
onError(error) {
logger.error('websocket server error', { error });
}
_sendNewOverlayToClient(client) {
if (client.readyState !== client.OPEN) {
logger.error('client socket not in ready state', {
state: client.readyState,
client: client._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
client.send(JSON.stringify(msg), error => {
if (error) {
logger.error(
'failed to send remoteOverlay to management agent client', {
error, client: client._socket._peername,
});
}
});
}
onNewOverlay(remoteOverlay) {
const remoteOverlayObj = JSON.parse(remoteOverlay);
saveConfigurationVersion(
this.loadedOverlay, remoteOverlayObj, logger, err => {
if (err) {
logger.error('failed to save remote overlay', { err });
return;
}
this.loadedOverlay = remoteOverlayObj;
this.wss.clients.forEach(
this._sendNewOverlayToClient.bind(this)
);
});
}
checkBrokenConnections() {
this.wss.clients.forEach(client => {
if (!client.isAlive) {
logger.info('close broken connection', {
client: client._socket._peername,
});
client.terminate();
return;
}
client.isAlive = false;
client.ping();
});
}
}
const server = new ManagementAgentServer();
server.start();

View File

@ -192,3 +192,163 @@ tests:
summary: Very high delete latency
exp_labels:
severity: critical
# QuotaMetricsNotAvailable (case with bucket quota)
##################################################################################################
- name: Quota metrics not available (bucket quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with account quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with both quota quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case without quota)
##################################################################################################
- name: Utilization service Latency
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaUnavailable
##################################################################################################
- name: Quota evaluation disabled
interval: 1m
input_series:
- series: s3_cloudserver_quota_unavailable_count{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 0+0x6 1+1x20 0+0x6
alert_rule_test:
- alertname: QuotaUnavailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaUnavailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet.
summary: High number of quota requests with metrics unavailable
exp_labels:
severity: critical
- alertname: QuotaUnavailable
eval_time: 30m
exp_alerts: []

View File

@ -6,6 +6,9 @@ x-inputs:
- name: service
type: constant
value: artesca-data-connector-s3api-metrics
- name: reportJob
type: constant
value: artesca-data-ops-report-handler
- name: replicas
type: constant
- name: systemErrorsWarningThreshold
@ -26,6 +29,9 @@ x-inputs:
- name: deleteLatencyCriticalThreshold
type: config
value: 1.000
- name: quotaUnavailabilityThreshold
type: config
value: 0.500
groups:
- name: CloudServer
@ -132,3 +138,45 @@ groups:
annotations:
description: "Latency of delete object operations is more than 1s"
summary: "Very high delete latency"
# As a platform admin I want to be alerted (warning) when the utilization metrics service is enabled
# but not available for at least half of the S3 services during the last minute
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
labels:
severity: warning
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when the utilization metrics service is enabled
# but not available during the last 10 minutes
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
for: 10m
labels:
severity: critical
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when quotas were not honored due to metrics
# being unavailable
- alert: QuotaUnavailable
expr: |
sum(increase(s3_cloudserver_quota_unavailable_count{namespace="${namespace}",service="${service}"}[2m]))
> 0
for: 5m
labels:
severity: critical
annotations:
description: "Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet."
summary: "High number of quota requests with metrics unavailable"

View File

@ -1625,7 +1625,7 @@
"targets": [
{
"datasource": null,
"expr": "sum(rate(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)",
"expr": "sum(rate(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval])) by(action)",
"format": "time_series",
"hide": false,
"instant": false,
@ -1697,7 +1697,7 @@
"targets": [
{
"datasource": null,
"expr": "sum(round(increase(http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)",
"expr": "sum(round(increase(s3_cloudserver_http_requests_total{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))) by(method)",
"format": "time_series",
"hide": false,
"instant": false,
@ -1931,7 +1931,7 @@
"targets": [
{
"datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "heatmap",
"hide": false,
"instant": false,
@ -1960,7 +1960,7 @@
},
"yAxis": {
"decimals": null,
"format": "dtdurations",
"format": "s",
"label": null,
"logBase": 1,
"max": null,
@ -2182,7 +2182,7 @@
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
@ -2196,7 +2196,7 @@
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
@ -2665,6 +2665,865 @@
"transformations": [],
"transparent": false,
"type": "piechart"
},
{
"collapsed": false,
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 65
},
"hideTimeOverride": false,
"id": 34,
"links": [],
"maxDataPoints": 100,
"panels": [],
"targets": [],
"title": "Quotas",
"transformations": [],
"transparent": false,
"type": "row"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 66
},
"hideTimeOverride": false,
"id": 35,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Buckets with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 70
},
"hideTimeOverride": false,
"id": 36,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Accounts with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 66
},
"hideTimeOverride": false,
"id": 37,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Operations with unavailable metrics",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 66
},
"hideTimeOverride": false,
"id": 38,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{action}}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluaton rate per S3 action",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "stepAfter",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "orange",
"index": 1,
"line": true,
"op": "gt",
"value": 90.0,
"yaxis": "left"
},
{
"color": "red",
"index": 2,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 74
},
"hideTimeOverride": false,
"id": 39,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota service uptime",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 74
},
"hideTimeOverride": false,
"id": 40,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Success",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Quota Exceeded",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation status code over time",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 74
},
"hideTimeOverride": false,
"id": 41,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (exceeded)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average quota evaluation latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"cards": {
"cardPadding": null,
"cardRound": null
},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"max": null,
"min": null,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 82
},
"heatmap": {},
"hideTimeOverride": false,
"hideZeroBuckets": false,
"highlightCards": true,
"id": 42,
"legend": {
"show": false
},
"links": [],
"maxDataPoints": 25,
"reverseYBuckets": false,
"targets": [
{
"datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "heatmap",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ le }}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation duration",
"tooltip": {
"show": true,
"showHistogram": true
},
"transformations": [],
"transparent": false,
"type": "heatmap",
"xAxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yAxis": {
"decimals": null,
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 18,
"x": 6,
"y": 82
},
"hideTimeOverride": false,
"id": 43,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (error)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average utilization metrics retrieval latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
}
],
"refresh": "30s",
@ -2766,5 +3625,5 @@
"timezone": "",
"title": "S3 service",
"uid": null,
"version": 31
"version": 110
}

View File

@ -331,7 +331,7 @@ requestsByAction = TimeSeries(
unit=UNITS.OPS_PER_SEC,
targets=[
Target(
expr='sum(rate(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501
expr='sum(rate(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval])) by(action)', # noqa: E501
legendFormat="{{action}}",
)
]
@ -345,7 +345,7 @@ requestsByMethod = PieChart(
unit=UNITS.SHORT,
targets=[
Target(
expr='sum(round(increase(http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501
expr='sum(round(increase(s3_cloudserver_http_requests_total{namespace="${namespace}", job=~"$job"}[$__rate_interval]))) by(method)', # noqa: E501
legendFormat="{{method}}",
),
],
@ -366,6 +366,28 @@ def average_latency_target(title, action="", by=""):
)
def average_quota_latency_target(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
def average_quota_retrieval_latency(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
averageLatencies = TimeSeries(
title="Average latencies",
dataSource="${DS_PROMETHEUS}",
@ -406,10 +428,10 @@ requestTime = Heatmap(
dataFormat="tsbuckets",
maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.DURATION_SECONDS),
yAxis=YAxis(format=UNITS.SECONDS),
color=HeatmapColor(mode="opacity"),
targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
format="heatmap",
legendFormat="{{ le }}",
)],
@ -433,11 +455,11 @@ bandWidth = TimeSeries(
unit="binBps",
targets=[
Target(
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Out"
),
Target(
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="In"
)
],
@ -525,6 +547,174 @@ top10Error5xxByBucket = top10_errors_by_bucket(
title="5xx : Top10 by Bucket", code='~"5.."'
)
quotaHealth = TimeSeries(
title="Quota service uptime",
legendDisplayMode="hidden",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="stepAfter",
fillOpacity=30,
unit=UNITS.PERCENT_FORMAT,
targets=[Target(
expr='avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",job="${job}"}[$__rate_interval])) * 100', # noqa: E501
)],
thresholds=[
Threshold("green", 0, 95.0),
Threshold("orange", 1, 90.0),
Threshold("red", 2, 0.0),
],
)
quotaStatusCode = TimeSeries(
title="Quota evaluation status code over time",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code=~"2..", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Success",
), Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code="429", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Quota Exceeded",
)],
)
quotaByAction = TimeSeries(
title="Quota evaluaton rate per S3 action",
dataSource="${DS_PROMETHEUS}",
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[
Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"}[$__rate_interval])) by(action)', # noqa: E501
legendFormat="{{action}}",
)
]
)
averageQuotaDuration = Heatmap(
title="Quota evaluation duration",
dataSource="${DS_PROMETHEUS}",
dataFormat="tsbuckets",
maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.SECONDS),
color=HeatmapColor(mode="opacity"),
targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
format="heatmap",
legendFormat="{{ le }}",
)],
)
operationsWithUnavailableMetrics = TimeSeries(
title="Operations with unavailable metrics",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
legendDisplayMode="hidden",
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_unavailable_count{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
)],
)
averageQuotaLatencies = TimeSeries(
title="Average quota evaluation latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_latency_target(code='~"2.."', by='type'),
legendFormat='{{ type }} (success)',
),
Target(
expr=average_quota_latency_target(code='"429"', by='type'),
legendFormat='{{ type }} (exceeded)',
),
],
)
averageMetricsRetrievalLatencies = TimeSeries(
title="Average utilization metrics retrieval latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_retrieval_latency(code='~"2.."', by='class'),
legendFormat='{{ class }} (success)',
),
Target(
expr=average_quota_retrieval_latency(
code='~"4..|5.."',
by='class'
),
legendFormat='{{ class }} (error)',
),
],
)
bucketQuotaCounter = Stat(
title="Buckets with quota",
description=(
"Number of S3 buckets with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
accountQuotaCounter = Stat(
title="Accounts with quota",
description=(
"Number of accounts with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
dashboard = (
Dashboard(
title="S3 service",
@ -630,6 +820,24 @@ dashboard = (
top10Error500ByBucket,
top10Error5xxByBucket
], height=8),
RowPanel(title="Quotas"),
layout.row([
layout.column([
layout.resize([bucketQuotaCounter], width=6, height=4),
layout.resize([accountQuotaCounter], width=6, height=4),
], height=8),
layout.resize([operationsWithUnavailableMetrics], width=6),
quotaByAction,
], height=8),
layout.row([
layout.resize([quotaHealth], width=6),
layout.resize([quotaStatusCode], width=6),
averageQuotaLatencies,
], height=8),
layout.row([
layout.resize([averageQuotaDuration], width=6),
averageMetricsRetrievalLatencies,
], height=8),
]),
)
.auto_panel_ids()

View File

@ -45,8 +45,8 @@ then
exit 1
fi
REGISTRY=${REGISTRY:-"registry.scality.com"}
PROJECT=${PROJECT:-"cloudserver-dev"}
REGISTRY=${REGISTRY:-"ghcr.io/scality"}
PROJECT=${PROJECT:-"cloudserver"}
set -x
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"

View File

@ -1,6 +1,6 @@
{
"name": "@zenko/cloudserver",
"version": "8.6.26",
"version": "8.8.27",
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
"main": "index.js",
"engines": {
@ -21,53 +21,61 @@
"dependencies": {
"@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0",
"arsenal": "git+https://github.com/scality/arsenal#8.1.127",
"async": "~2.5.0",
"aws-sdk": "2.905.0",
"bucketclient": "scality/bucketclient#8.1.9",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^2.5.0",
"aws-sdk": "^2.905.0",
"bufferutil": "^4.0.6",
"commander": "^2.9.0",
"cron-parser": "^2.11.0",
"diskusage": "1.1.3",
"diskusage": "^1.1.3",
"google-auto-auth": "^0.9.1",
"http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1",
"https-proxy-agent": "^2.2.0",
"level-mem": "^5.0.1",
"moment": "^2.26.0",
"mongodb": "^2.2.31",
"mongodb": "^5.2.0",
"node-fetch": "^2.6.0",
"node-forge": "^0.7.1",
"npm-run-all": "~4.1.5",
"npm-run-all": "^4.1.5",
"prom-client": "14.2.0",
"request": "^2.81.0",
"sql-where-parser": "~2.2.1",
"utapi": "github:scality/utapi#8.1.13",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
"sql-where-parser": "^2.2.1",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"utf-8-validate": "^5.0.8",
"utf8": "~2.1.1",
"utf8": "^2.1.1",
"uuid": "^8.3.2",
"vaultclient": "scality/vaultclient#8.3.11",
"werelogs": "scality/werelogs#8.1.4",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"ws": "^5.1.0",
"xml2js": "~0.4.16"
"xml2js": "^0.4.16"
},
"overrides": {
"ltgt": "^2.2.0"
},
"devDependencies": {
"@babel/core": "^7.25.2",
"@babel/preset-env": "^7.25.3",
"babel-loader": "^9.1.3",
"bluebird": "^3.3.1",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^13.1.0",
"eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1",
"ioredis": "4.9.5",
"istanbul": "1.0.0-alpha.2",
"istanbul-api": "1.0.0-alpha.13",
"ioredis": "^4.9.5",
"istanbul": "^1.0.0-alpha.2",
"istanbul-api": "^1.0.0-alpha.13",
"lolex": "^1.4.0",
"mocha": "^2.3.4",
"mocha": ">=3.1.2",
"mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7",
"node-mocks-http": "1.5.2",
"node-mocks-http": "^1.5.2",
"sinon": "^13.0.1",
"tv4": "^1.2.7"
"tv4": "^1.2.7",
"webpack": "^5.93.0",
"webpack-cli": "^5.1.4"
},
"scripts": {
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
@ -108,10 +116,11 @@
"utapi_replay": "node lib/utapi/utapiReplay.js",
"utapi_reindex": "node lib/utapi/utapiReindex.js",
"management_agent": "node managementAgent.js",
"test": "CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test": "CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
"test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota",
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"

View File

@ -0,0 +1,39 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'deletequotatestbucket';
const nonExistantBucket = 'deletequotatestnonexistantbucket';
describe('Test delete bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should delete the bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.ok(true);
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`);
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
});

View File

@ -0,0 +1,77 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'getquotatestbucket';
const quota = { quota: 1000 };
describe('Test get bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should return the quota', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
const data = await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.strictEqual(data.GetBucketQuota.Name[0], bucket);
assert.strictEqual(data.GetBucketQuota.Quota[0], '1000');
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return no such bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
try {
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.fail('Expected NoSuchQuota error');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
}
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return no such bucket quota', async () => {
try {
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
try {
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
assert.fail('Expected NoSuchQuota error');
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
}
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
});

View File

@ -475,4 +475,58 @@ describe('Listing corner cases tests', () => {
}
});
});
it('should not list DeleteMarkers for version suspended buckets', done => {
const obj = { name: 'testDeleteMarker.txt', value: 'foo' };
const bucketName = `bucket-test-delete-markers-not-listed${Date.now()}`;
let objectCount = 0;
return async.waterfall([
next => s3.createBucket({ Bucket: bucketName }, err => next(err)),
next => {
const params = {
Bucket: bucketName,
VersioningConfiguration: {
Status: 'Suspended',
},
};
return s3.putBucketVersioning(params, err =>
next(err));
},
next => s3.putObject({
Bucket: bucketName,
Key: obj.name,
Body: obj.value,
}, err =>
next(err)),
next => s3.listObjectsV2({ Bucket: bucketName },
(err, res) => {
if (err) {
return next(err);
}
objectCount = res.Contents.length;
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), true);
return next();
}),
next => s3.deleteObject({
Bucket: bucketName,
Key: obj.name,
}, function test(err) {
const headers = this.httpResponse.headers;
assert.strictEqual(
headers['x-amz-delete-marker'], 'true');
return next(err);
}),
next => s3.listObjectsV2({ Bucket: bucketName },
(err, res) => {
if (err) {
return next(err);
}
assert.strictEqual(res.Contents.length, objectCount - 1);
assert.strictEqual(res.Contents.some(c => c.Key === obj.name), false);
return next();
}),
next => s3.deleteObject({ Bucket: bucketName, Key: obj.name, VersionId: 'null' }, err => next(err)),
next => s3.deleteBucket({ Bucket: bucketName }, err => next(err))
], err => done(err));
});
});

View File

@ -0,0 +1,70 @@
const AWS = require('aws-sdk');
const S3 = AWS.S3;
const assert = require('assert');
const getConfig = require('../support/config');
const sendRequest = require('../quota/tooling').sendRequest;
const bucket = 'updatequotatestbucket';
const nonExistantBucket = 'updatequotatestnonexistantbucket';
const quota = { quota: 2000 };
const negativeQuota = { quota: -1000 };
const wrongquotaFromat = '1000';
const largeQuota = { quota: 1000000000000 };
describe('Test update bucket quota', () => {
let s3;
before(() => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3 = new S3(config);
AWS.config.update(config);
});
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
it('should update the quota', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
assert.ok(true);
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
it('should return no such bucket error', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`, JSON.stringify(quota));
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
}
});
it('should return error when quota is negative', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(negativeQuota));
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
assert.strictEqual(err.Error.Message[0], 'Quota value must be a positive number');
}
});
it('should return error when quota is not in correct format', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, wrongquotaFromat);
} catch (err) {
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
}
});
it('should handle large quota values', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(largeQuota));
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
});

View File

@ -33,7 +33,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() {
let s3;
before(() => {
const config = getConfig('default');
const config = getConfig('default', { signatureVersion: 'v2' });
s3 = new S3(config);
});

View File

@ -45,7 +45,7 @@ const itSkipCeph = isCEPH ? it.skip : it.skip;
const describeSkipIfCeph = isCEPH ? describe.skip : describe.skip; // always skip
if (config.backends.data === 'multiple') {
describeSkipIfNotMultiple = describe.skip;
describeSkipIfNotMultiple = describe;
describeSkipIfNotMultipleOrCeph = isCEPH ? describe.skip : describe.skip; // always skip
const awsConfig = getRealAwsConfig(awsLocation);
awsS3 = new AWS.S3(awsConfig);

View File

@ -7,6 +7,7 @@ const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util');
const { createEncryptedBucketPromise } =
require('../../lib/utility/createEncryptedBucket');
const { fakeMetadataTransition, fakeMetadataArchive } = require('../utils/init');
const sourceBucketName = 'supersourcebucket81033016532';
const sourceObjName = 'supersourceobject';
@ -710,6 +711,72 @@ describe('Object Part Copy', () => {
});
});
it('should not copy a part of a cold object', done => {
const archive = {
archiveInfo: {
archiveId: '97a71dfe-49c1-4cca-840a-69199e0b0322',
archiveVersion: 5577006791947779
},
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archive, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, err => {
assert.strictEqual(err.code, 'InvalidObjectState');
assert.strictEqual(err.statusCode, 403);
done();
});
});
});
it('should copy a part of an object when it\'s transitioning to cold', done => {
fakeMetadataTransition(sourceBucketName, sourceObjName, undefined, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, (err, res) => {
checkNoError(err);
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
done();
});
});
});
it('should copy a part of a restored object', done => {
const archiveCompleted = {
archiveInfo: {},
restoreRequestedAt: new Date(0),
restoreRequestedDays: 5,
restoreCompletedAt: new Date(10),
restoreWillExpireAt: new Date(10 + (5 * 24 * 60 * 60 * 1000)),
};
fakeMetadataArchive(sourceBucketName, sourceObjName, undefined, archiveCompleted, err => {
assert.ifError(err);
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}, (err, res) => {
checkNoError(err);
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
done();
});
});
});
describe('copying parts by another account', () => {
const otherAccountBucket = 'otheraccountbucket42342342342';
const otherAccountKey = 'key';

Some files were not shown because too many files have changed in this diff Show More