Compare commits

..

2666 Commits

Author SHA1 Message Date
Vitaliy Filippov b5711e9cbf Use fs.readFileSync to read config file instead of require 2024-08-13 11:19:38 +03:00
Vitaliy Filippov 36dc6298d2 Use webpack to pack 2024-08-13 02:20:08 +03:00
Vitaliy Filippov bc2d637578 Add installation instructions for Vitastor backend 2024-08-12 01:36:42 +03:00
Vitaliy Filippov b543695048 Add example Vitastor backend configs 2024-08-11 17:24:05 +03:00
Vitaliy Filippov 90024d044d Configure "legacy" werelogs because otherwise MultipleBackendGateway was skipping messages 2024-08-04 01:22:48 +03:00
Vitaliy Filippov 451ab33f68 Use config.workers instead of config.clusters 2024-08-03 14:10:39 +03:00
Vitaliy Filippov c86107e912 Add authdata config file reference to config.json 2024-08-03 01:36:01 +03:00
Vitaliy Filippov 0a5962f256 Require scality kms only if kms backend is scality 2024-08-03 01:29:04 +03:00
Vitaliy Filippov 0e292791c6 Setup backends in config.json 2024-08-02 01:45:38 +03:00
Vitaliy Filippov fc07729bd0 Use ^versions 2024-08-02 01:44:13 +03:00
Vitaliy Filippov 4527dd6795 Do not store actual configs in git 2024-08-01 15:52:02 +03:00
Vitaliy Filippov 05fb581023 Use x-amz-storage-class instead of x-amz-meta-scal-location-constraint
FIXME: Ideally, both locations and storage classes should be supported
2024-07-28 02:00:38 +03:00
Vitaliy Filippov 956739a04e Use internal vaultclient for utapi server 2024-07-23 16:32:48 +03:00
Vitaliy Filippov 7ad0888a66 Change git dependency URLs 2024-07-21 17:36:47 +03:00
Vitaliy Filippov bf01ba4ed1 Change git dependency URLs 2024-07-21 15:26:06 +03:00
Vitaliy Filippov ab019e7e50 Make vaultclient dependency optional 2024-07-21 14:19:54 +03:00
Vitaliy Filippov 3797695e74 Make bucketclient dependency optional 2024-07-18 11:17:05 +03:00
Vitaliy Filippov c8084196c4 Remove remote management 2024-07-16 20:34:11 +03:00
bert-e b72e918ff9 Merge branch 'w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.8/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 22887f47d8 Merge branch 'w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.7/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:52 +00:00
bert-e 0cd10a73f3 Merge branch 'w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/8.6/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
bert-e e139406612 Merge branch 'bugfix/CLDSRV-555-deleteObjects-policy-eval-fix' into tmp/octopus/w/7.70/bugfix/CLDSRV-555-deleteObjects-policy-eval-fix 2024-07-15 12:20:51 +00:00
Maha Benzekri d91853a38b
processBucketPolicy fixup for objectDelete
Introduced by https://github.com/scality/cloudserver/pull/5580
we now do send a requestContext with no specific resource instead
of "null", which results in a policy evaluation error.
As we get an implicit deny for the requestType "objectDelete",
cause the processed result to be false , thus sending an empty
array of objects to vault , resulting in a deny even when the policy
allows the action on specific objects.

Linked Issue : https://scality.atlassian.net/browse/CLDSRV-555
2024-07-15 14:20:08 +02:00
Mickael Bourgois a7e798f909
CLDSRV-544: bump version 8.8.27 2024-07-03 19:08:02 +02:00
Mickael Bourgois 3a1ba29869
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-544-stderr' into w/8.8/improvement/CLDSRV-544-stderr 2024-07-03 19:07:41 +02:00
Mickael Bourgois dbb9b6d787
CLDSRV-544: bump version 8.7.48 2024-07-03 18:52:35 +02:00
Mickael Bourgois fce76f0934
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-544-stderr' into w/8.7/improvement/CLDSRV-544-stderr 2024-07-03 18:52:20 +02:00
Mickael Bourgois 0e39aaac09
CLDSRV: bump version 8.6.27 2024-07-03 18:48:28 +02:00
Mickael Bourgois 0b14c93fac
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-544-stderr' into w/8.6/improvement/CLDSRV-544-stderr 2024-07-03 18:48:12 +02:00
bert-e bf7e4b7e23 Merge branch 'w/8.7/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:30 +00:00
bert-e 92f4794727 Merge branch 'w/8.6/bugfix/CLDSRV-547-fixup-version' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-fixup-version 2024-06-27 21:23:29 +00:00
Jonathan Gramain c6ef85e3a1 Merge remote-tracking branch 'origin/bugfix/CLDSRV-547-fixup-version' into w/8.6/bugfix/CLDSRV-547-fixup-version 2024-06-27 14:05:27 -07:00
bert-e 9c936f2b83 Merge branch 'w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.8/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
bert-e d26bac2ebc Merge branch 'w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into tmp/octopus/w/8.7/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 18:17:08 +00:00
Jonathan Gramain cfb9db5178 Merge branch 'w/7.70/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex' into w/8.6/bugfix/CLDSRV-547-updateRedisConfigForUtapiReindex 2024-06-27 10:53:41 -07:00
bert-e e6b240421b Merge branch 'w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.8/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
bert-e 81739e3ecf Merge branch 'w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into tmp/octopus/w/8.7/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-26 01:47:54 +00:00
Jonathan Gramain c475503248 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel' into w/8.6/bugfix/CLDSRV-549-restoreGitCommitShaImageLabel 2024-06-25 18:40:18 -07:00
williamlardier 4f7aa54886 CLDSRV-541: bump project version 2024-06-13 13:58:54 +02:00
williamlardier 0117a5b0b4 CLDSRV-541: add unit test for deleteobjects authz 2024-06-13 13:58:54 +02:00
williamlardier f679831ba2 CLDSRV-541: update unit tests 2024-06-13 13:56:18 +02:00
williamlardier bb162ca7d3 CLDSRV-541: send request context in deleteobjects to get quota information 2024-06-13 11:58:33 +02:00
williamlardier 0c6dfc7b6e CLDSRV-537: bump project version 2024-05-31 13:47:26 +02:00
williamlardier d608d849df CLDSRV-537: bump checkout version for alerts 2024-05-31 13:47:26 +02:00
williamlardier 2cb63f58d4 CLDSRV-537: bump action-prom-render-test version 2024-05-31 13:44:05 +02:00
williamlardier 51585712f4 CLDSRV-537: do not raise quota error if no quota is defined
This ensures fresh installs, or buckets that get empty-ed are
not triggering the alert by mistake
2024-05-31 13:44:05 +02:00
bert-e 61eb24e46f Merge branch 'w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a34b162782 Merge branch 'w/8.7/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.8/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:02 +00:00
bert-e a9e50fe046 Merge branch 'w/7.70/bugfix/CLDSRV-534/disable_git_clone_protection' into tmp/octopus/w/8.6/bugfix/CLDSRV-534/disable_git_clone_protection 2024-05-22 17:13:01 +00:00
bert-e 09dc45289c Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:31 +00:00
bert-e 47c628e0e1 Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 13:21:30 +00:00
Nicolas Humbert a1f4d3fe8a CLDSRV-529 use shorthand utapi dependency format 2024-05-17 15:10:40 +02:00
williamlardier 926242b077 CLDSRV-553: bump project version 2024-05-17 12:35:59 +02:00
williamlardier aa2aac5db3 CLDSRV-553: functional restore test to simulate cold backend calls 2024-05-17 12:35:59 +02:00
williamlardier f2e2d82e51 CLDSRV-553: unit test the onlyCheckQuota flag 2024-05-17 12:35:59 +02:00
williamlardier 88ad86b0c6 CLDSRV-553: adapt calls to quota evaluation
When the API is being called by a cold backend, the
x-scal-s3-version-id header is set. In this case, the quotas must
be evaluated with a 0 inflight.
2024-05-17 12:35:59 +02:00
bert-e 8f25892247 Merge branch 'w/8.7/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.8/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:32 +00:00
bert-e 9ac207187b Merge branch 'w/8.6/bugfix/CLDSRV-529/bump_utapi' into tmp/octopus/w/8.7/bugfix/CLDSRV-529/bump_utapi 2024-05-17 08:40:31 +00:00
Anurag Mittal 624a04805f
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-529/bump_utapi' into w/8.6/bugfix/CLDSRV-529/bump_utapi 2024-05-17 10:40:00 +02:00
williamlardier 38d1ac1d2c CLDSRV-553: conditionnaly force evaluating quotas with 0 inflight
A corner case was found, where any PUT from the cold backend would
fail if the quota is already exceeded, as the storage was reserved
for the restore, but the restore itself requires some more bytes
as inflights when evaluating quotas. By passing a flag in the quota
evaluation function, we ensure that we can, in these cases,
evaluate the quotas with 0 inflight.
2024-05-17 08:06:35 +02:00
Maha Benzekri 63f6a75a86
CLDSRV-530: bump project version 2024-05-10 18:36:01 +02:00
Maha Benzekri 41acc7968e
CLDSRV-530: from accountwithQuota to accountWithQuotaCount 2024-05-10 18:32:07 +02:00
williamlardier c98c5207fc CLDSRV-520: bump project version 2024-05-10 09:51:02 +02:00
williamlardier 615ee393a4 CLDSRV-520: fix federation image with tsc 2024-05-10 09:51:02 +02:00
williamlardier 97dfc699aa CLDSRV-520: bump cloudserver version 2024-05-10 08:12:35 +02:00
williamlardier 76786282d1 CLDSRV-520: deduplicate quota logic 2024-05-10 08:12:35 +02:00
williamlardier a19d6524be CLDSRV-520: generic quota retrieval latency dashboard 2024-05-10 08:12:35 +02:00
williamlardier bbf6dfba22 CLDSRV-520: monitor quota cleanup
The finalization of quota logic will always be executed. Some tests
are added to ensure the inflights are only cleaned when they are
enabled, and an error happened in the API.
In any case, this ensures we monitor quotas in a single place,
for each of the executed action, and compute correctly the total
duration of the quota impact on the API.
2024-05-10 08:11:27 +02:00
williamlardier f0663fd507 CLDSRV-520: add dashboards 2024-05-10 08:11:27 +02:00
williamlardier d4decbbd6c CLDSRV-520: add alerts 2024-05-10 08:11:27 +02:00
williamlardier 288b2b7b87 CLDSRV-520: observe number of buckets and accounts with quota 2024-05-10 08:11:27 +02:00
williamlardier ccf9b62e59 CLDSRV-520: observe metrics during quota evaluations 2024-05-10 08:11:27 +02:00
williamlardier 9fc2d552ae CLDSRV-520: add metrics for quota 2024-05-07 17:56:24 +02:00
williamlardier d7cc4cf7d5 CLDSRV-515: adapt dockerfile for scubaclient 2024-05-07 16:24:25 +02:00
williamlardier 334d33ef44 CLDSRV-515: unit testing 2024-05-07 16:24:25 +02:00
williamlardier 989b0214d9 CLDSRV-515: functional testing 2024-05-07 16:21:13 +02:00
williamlardier 04d0730f97 CLDSRV-515: clear inflights in case of quota exceeded
- If the quotas are evaluated with success and inflights are
  enabled, it means the quota service will store the information
  and persist it till the next update of the utilization metrics.
  In this case, aany API that will fail after authorization would
  still mean that the bytes are considered, even if nothing was
  written. To overcome that, we call a function from the quota
  evaluation logic to erase anything that we wrote during the
  authorization.
2024-05-07 16:21:13 +02:00
williamlardier fbc642c022 CLDSRV-515: evaluate quotas
Quotas are evaluated:
- As part of the authorization process, after both the bucket and
  the object are authorized. The checks are skipped if the API does
  not need any quota evaluation, if the inflight bytes are 0 (i.e.,
  no data added, so no need to check the quota).
- The Copy APIs will evaluate the quotas when the source object is
  checked. In this particular case, the action is objectGet, so a
  flag is passed to force the quota evaluation logic. A subsequent
  check is done in the logic.
- The restoreObject API has a special case where the extension of
  the restoration duration would still cause the evaluation of the
  quotas, causing a potential increase in the inflights stored. We
  detect this case and remove any added inflight.
2024-05-07 16:21:13 +02:00
williamlardier 104435f0b6 CLDSRV-515: implement the quota logic as an helper file 2024-05-07 16:21:13 +02:00
williamlardier a362ac202e CLDSRV-515: bootstrap scuba on startup 2024-05-07 16:21:13 +02:00
williamlardier 1277e58150 CLDSRV-515: create a wrapper for scubaclient and quota service 2024-05-07 16:21:13 +02:00
williamlardier 7727ccf5f0 CLDSRV-515: add configuration for quotas
- Quota service is generic. We only support scuba backend now,
  but we can add others later, if needed, as long as they share
  the same implementation as the scuba client.
- Scuba configuration is passed for the scubaclient tool.
- Ability to disable the inflights is provided. This changes the
  behavior of the quota checks, so that the inflights won't be
  part of the request to the utilization metrics services. This
  reduces the complexity of the quota evaluation logic in case
  of error, as no cleanup will be needed in this case. This,
  however, requires a backend that can provide up to date metrics
  (i.e., <2s).
2024-05-05 15:31:34 +02:00
williamlardier 71860fc90c CLDSRV-515: do not recreate variable at every authz 2024-05-05 15:31:04 +02:00
williamlardier e504b52de7 CLDSRV-515: bump arsenal and vaultclient, introduce scubaclient 2024-05-02 15:09:23 +02:00
Maha Benzekri b369a47c4d CLDSRV-516: add tests 2024-05-02 14:44:31 +02:00
Maha Benzekri b4fa81e832 CLDSRV-516: implement BucketDeleteQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 1e03d53879 CLDSRV-516: implement BucketGetQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri 63e502d419 CLDSRV-516: implement UpdateBucketQuota API 2024-05-02 14:44:31 +02:00
Maha Benzekri d2a31dc20a CLDSRV-516: specify the signature version of old auth tests
This is unrelated to the quotas, but blocks the CI.
2024-05-02 14:44:28 +02:00
Maha Benzekri f24411875f CLDSRV-516: introduce quota APIs in router 2024-05-02 14:28:56 +02:00
Maha Benzekri 4fd7faa6a3 CLDSRV-516: bump arsenal version 2024-05-02 14:27:44 +02:00
Francois Ferrand 118aaba702
Use sproxyd from ghcr
Issue: CLDSRV-524
2024-04-18 20:38:37 +02:00
Francois Ferrand e4442fdc52
Merge branch 'w/8.7/improvement/CLDSRV-524' into w/8.8/improvement/CLDSRV-524 2024-04-16 18:36:03 +02:00
Francois Ferrand 7fa199741f
Merge branch 'w/8.6/improvement/CLDSRV-524' into w/8.7/improvement/CLDSRV-524 2024-04-16 18:35:32 +02:00
Francois Ferrand f7f95af78f
Migrate to ghcr
Issue: CLDSRV-524
2024-04-16 18:34:49 +02:00
Francois Ferrand 2dc053a784
Merge branch 'w/7.70/improvement/CLDSRV-524' into w/8.6/improvement/CLDSRV-524 2024-04-16 17:57:54 +02:00
bert-e b4754c68ea Merge branches 'w/8.8/bugfix/CLDSRV-518/duplication' and 'q/5548/8.7/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.8 2024-03-25 12:56:17 +00:00
bert-e 11aea5d93b Merge branches 'w/8.7/bugfix/CLDSRV-518/duplication' and 'q/5548/8.6/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.7 2024-03-25 12:56:17 +00:00
bert-e 0c50a5952f Merge branches 'w/8.6/bugfix/CLDSRV-518/duplication' and 'q/5548/7.70/bugfix/CLDSRV-518/duplication' into tmp/octopus/q/8.6 2024-03-25 12:56:16 +00:00
Nicolas Humbert a22719ed47 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-518/duplication' into w/8.8/bugfix/CLDSRV-518/duplication 2024-03-20 08:48:00 +01:00
Nicolas Humbert 41975d539d Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-518/duplication' into w/8.7/bugfix/CLDSRV-518/duplication 2024-03-19 18:12:42 +01:00
Nicolas Humbert c6724eb811 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-518/duplication' into w/8.6/bugfix/CLDSRV-518/duplication 2024-03-19 05:54:35 +01:00
bert-e 8796bf0f44 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
bert-e 735fcd04ef Merge branch 'w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 21:04:57 +00:00
Jonathan Gramain c5522685b2 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 14:04:21 -07:00
Nicolas Humbert caf3146662 CLDSRV-518 fix Ruby dependency: excon
(cherry picked from commit cc1607eaaecb97ab5c48da15f1b1449fe7a4680f)
2024-03-13 13:58:41 -07:00
bert-e 1dee707eb8 Merge branch 'w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger' into tmp/octopus/w/8.8/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 17:36:39 +00:00
Jonathan Gramain 2c8d69c20a Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.7/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 10:18:39 -07:00
Jonathan Gramain 0b2b6ceeb5 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-513-batchDeleteRequestLogger' into w/8.6/bugfix/CLDSRV-513-batchDeleteRequestLogger 2024-03-13 09:46:43 -07:00
bert-e 9dc34f2155 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:14 +00:00
bert-e 08a4c3ade3 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:05:13 +00:00
Nicolas Humbert d5c731856b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-03-07 10:51:36 +01:00
bert-e 5435c14116 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:40 +00:00
bert-e 38c44ea874 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-03-01 16:31:39 +00:00
Nicolas Humbert 4200346dd2 CLDSRV-501 skip tests related to Backbeat routes for replication 2024-03-01 17:16:36 +01:00
bert-e 5472d0da59 Merge branch 'w/8.7/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
bert-e cdc0bb1128 Merge branch 'w/8.6/bugfix/CLDSRV-501/putmetadata' into tmp/octopus/w/8.7/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:03:38 +00:00
Nicolas Humbert 795f8bcf1c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-501/putmetadata' into w/8.6/bugfix/CLDSRV-501/putmetadata 2024-02-29 10:44:42 +01:00
KillianG 39cba3ee6c
Merge remote-tracking branch 'origin/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust' into w/8.8/improvement/CLDSRV-512-use-TPF-variable-when-restore-adjust 2024-02-27 11:31:55 +01:00
KillianG a00952712f
Bump 8.7.47
Issue: CLDSRV-512
2024-02-27 10:41:34 +01:00
KillianG a246e18e17
Updatest test for startRestore
Issue: CLDSRV-512
2024-02-27 10:26:19 +01:00
KillianG 3bb3a4d161
Use scaledMsPerDay when restore-adjust
Use scaledMsPerday when restoring an object that has already been restored to be able to make the time go faster for testing purpose

Issue: CLDSRV-512
2024-02-27 10:26:11 +01:00
bert-e c6ba7f981e Merge branches 'w/8.8/bugfix/CLDSRV-498/null' and 'q/5526/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.8 2024-02-21 13:57:14 +00:00
bert-e 69c82da878 Merge branches 'w/8.6/bugfix/CLDSRV-498/null' and 'q/5526/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.6 2024-02-21 13:57:13 +00:00
bert-e 762ae5a0ff Merge branches 'w/8.7/bugfix/CLDSRV-498/null' and 'q/5526/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/q/8.7 2024-02-21 13:57:13 +00:00
bert-e 3205d117f5 Merge branches 'w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.8 2024-02-20 13:05:07 +00:00
bert-e 4eafae44d8 Merge branches 'w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/7.70/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.6 2024-02-20 13:05:06 +00:00
bert-e 4cab3c84f3 Merge branches 'w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' and 'q/5539/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into tmp/octopus/q/8.7 2024-02-20 13:05:06 +00:00
williamlardier 0dcc93cdbe Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.8/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:49:56 +01:00
williamlardier 2f2f91d6e8 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.7/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:48:05 +01:00
williamlardier a28b141dfb Merge remote-tracking branch 'origin/bugfix/CLDSRV-508-fix-bucket-tagging' into w/8.6/bugfix/CLDSRV-508-fix-bucket-tagging 2024-02-20 13:43:22 +01:00
bert-e 1433973e5c Merge branch 'w/8.7/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e 201170b1ed Merge branch 'w/8.6/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-498/null 2024-02-20 11:24:08 +00:00
bert-e f13985094e Merge branch 'w/7.70/bugfix/CLDSRV-498/null' into tmp/octopus/w/8.6/bugfix/CLDSRV-498/null 2024-02-20 11:24:07 +00:00
bert-e 242b2ec85a Merge branches 'w/8.8/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.7/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.8 2024-02-19 15:00:59 +00:00
bert-e 3186a97113 Merge branches 'w/8.7/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/8.6/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.7 2024-02-19 15:00:59 +00:00
bert-e bb278f7d7e Merge branches 'w/8.6/bugfix/CLDSRV-505-ip-handling-fix' and 'q/5534/7.70/bugfix/CLDSRV-505-ip-handling-fix' into tmp/octopus/q/8.6 2024-02-19 15:00:58 +00:00
Will Toozs 0118dfabbb
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-505-ip-handling-fix' into w/8.8/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:40:58 +01:00
Will Toozs ff40dfaadf
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-505-ip-handling-fix' into w/8.7/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:25:18 +01:00
Will Toozs 9a31236da0
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-505-ip-handling-fix' into w/8.6/bugfix/CLDSRV-505-ip-handling-fix 2024-02-19 15:22:08 +01:00
bert-e 9c99a6980f Merge branches 'w/8.8/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.8 2024-02-19 10:16:26 +00:00
bert-e d4e255781b Merge branches 'w/8.7/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.7 2024-02-19 10:16:26 +00:00
bert-e f5763d012e Merge branches 'w/8.6/bugfix/CLDSRV-507-bp-fixes' and 'q/5530/7.70/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/q/8.6 2024-02-19 10:16:24 +00:00
bert-e 1afaaec0ac Merge branch 'w/8.7/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.8/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:24 +00:00
bert-e e20e458971 Merge branch 'w/8.6/bugfix/CLDSRV-507-bp-fixes' into tmp/octopus/w/8.7/bugfix/CLDSRV-507-bp-fixes 2024-02-19 09:13:23 +00:00
williamlardier 56e52de056 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-507-bp-fixes' into w/8.6/bugfix/CLDSRV-507-bp-fixes 2024-02-19 10:01:09 +01:00
bert-e bef9220032 Merge branches 'w/8.8/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.8 2024-02-15 18:43:31 +00:00
bert-e de20f1efdc Merge branches 'w/8.7/bugfix/CLDSRV-497/putmetadata' and 'q/5525/8.6/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.7 2024-02-15 18:43:31 +00:00
bert-e 4817f11f36 Merge branches 'w/8.6/bugfix/CLDSRV-497/putmetadata' and 'q/5525/7.70/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/q/8.6 2024-02-15 18:43:30 +00:00
bert-e b89d19c9f8 Merge branch 'w/8.7/bugfix/CLDSRV-497/putmetadata' into tmp/octopus/w/8.8/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:58:27 +00:00
Nicolas Humbert 4dc9788629 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-497/putmetadata' into w/8.7/bugfix/CLDSRV-497/putmetadata 2024-02-15 18:43:28 +01:00
Nicolas Humbert 65a891d6f8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-497/putmetadata' into w/8.6/bugfix/CLDSRV-497/putmetadata 2024-02-15 17:51:48 +01:00
bert-e 06dc042154 Merge branches 'w/8.8/improvement/CLDSRV-502' and 'q/5528/8.7/improvement/CLDSRV-502' into tmp/octopus/q/8.8 2024-02-08 13:49:18 +00:00
bert-e aa4643644a Merge branches 'w/8.7/improvement/CLDSRV-502' and 'q/5528/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.7 2024-02-08 13:49:18 +00:00
bert-e 89edf7e3d0 Merge branch 'w/8.6/improvement/CLDSRV-502' into tmp/octopus/q/8.6 2024-02-08 13:49:18 +00:00
Francois Ferrand 4c7d3ae4bc
Merge branch 'w/8.7/improvement/CLDSRV-502' into w/8.8/improvement/CLDSRV-502 2024-02-05 18:50:27 +01:00
Francois Ferrand 23883dae8b
Merge branch 'w/8.6/improvement/CLDSRV-502' into w/8.7/improvement/CLDSRV-502 2024-02-05 18:50:12 +01:00
Francois Ferrand e616ffa374
gha: fix test alert trigger to match other premerge build
Issue: CLDSRV-502
2024-02-05 18:49:31 +01:00
Francois Ferrand 515c20e4cf
Merge branch 'w/7.70/improvement/CLDSRV-502' into w/8.6/improvement/CLDSRV-502 2024-02-05 18:48:18 +01:00
Francois Ferrand 531c83a359
Release 8.8.17
Issue: CLDSRV-500
2024-02-05 17:35:43 +01:00
Francois Ferrand b84fa851f7
Merge branch 'w/8.7/bugfix/CLDSRV-500' into w/8.8/bugfix/CLDSRV-500 2024-02-05 17:35:20 +01:00
Francois Ferrand 4cb1a879f7
Release 8.7.44
Issue: CLDSRV-500
2024-02-05 17:34:45 +01:00
Francois Ferrand 7ae55b20e7
Merge branch 'bugfix/CLDSRV-500' into w/8.7/bugfix/CLDSRV-500 2024-02-05 17:32:53 +01:00
Francois Ferrand d0a6fa17a5
Release 8.6.24
Issue: CLDSRV-500
2024-02-05 17:31:36 +01:00
Francois Ferrand 7275459f70
Use rate interval in `Request time` panel
- Should use $__rate_interval, which handles small time range.
- Regenerating the dashboard also fixes the 'latency per s3 action'
  panel.

Issue: CLDSRV-500
2024-02-01 15:49:29 +01:00
Hervé Dombya 363afcd17f CLDSRV-473: fix cors issues in getVeeamFile 2024-01-26 15:59:10 +01:00
Frédéric Meinnel 1cf0250ce9 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.8/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:06:05 +01:00
Frédéric Meinnel 20d0b38d0b Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.7/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 15:05:39 +01:00
Frédéric Meinnel 9988a8327a Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests' into w/8.6/bugfix/CLDSRV-494/fix-generate-v4-headers-for-put-with-body-requests 2024-01-23 14:06:31 +01:00
Frédéric Meinnel 601619f200 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.8/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:24:05 +01:00
Frédéric Meinnel a92e71fd50 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.7/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:22:55 +01:00
Frédéric Meinnel 8802ea0617 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates' into w/8.6/bugfix/CLDSRV-493/fully-align-with-aws-on-lifecycle-configuration-dates 2024-01-17 13:21:42 +01:00
bert-e 43f62b847c Merge branch 'w/8.7/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.8/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e a031905bba Merge branch 'w/8.6/bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.7/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:39 +00:00
bert-e 13ad6881f4 Merge branch 'bugfix/CLDSRV-492-head-monitoring' into tmp/octopus/w/8.6/bugfix/CLDSRV-492-head-monitoring 2024-01-16 20:57:38 +00:00
bert-e cd2406b827 Merge branches 'w/8.8/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.8 2024-01-15 09:47:24 +00:00
bert-e 62f707caff Merge branches 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.7 2024-01-15 09:47:23 +00:00
bert-e f01ef00a52 Merge branches 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' and 'q/5520/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/q/8.6 2024-01-15 09:47:23 +00:00
bert-e 848bf318fe Merge branches 'development/8.8' and 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:46 +00:00
bert-e 0beb48a1fd Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:45 +00:00
bert-e 618d4dffc7 Merge branches 'development/8.6' and 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-15 09:07:44 +00:00
Will Toozs d274acd8ed
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-436-bump-version' into w/8.8/improvement/CLDSRV-436-bump-version 2024-01-11 13:10:57 +01:00
Will Toozs e6d9e8fc35
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-436-bump-version' into w/8.7/improvement/CLDSRV-436-bump-version 2024-01-11 11:50:25 +01:00
Will Toozs b08edefad6
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bump-version' into w/8.6/improvement/CLDSRV-436-bump-version 2024-01-11 11:24:50 +01:00
bert-e 7bb004586d Merge branch 'w/8.7/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.8/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:49 +00:00
bert-e d48de67723 Merge branch 'w/8.6/improvement/CLDSRV-436-bp-conds' into tmp/octopus/w/8.7/improvement/CLDSRV-436-bp-conds 2024-01-10 21:13:48 +00:00
Will Toozs fa4dec01cb
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-436-bp-conds' into w/8.6/improvement/CLDSRV-436-bp-conds 2024-01-10 22:00:37 +01:00
bert-e b141c59bb7 Merge branch 'w/8.7/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 0b79ecd942 Merge branch 'w/8.6/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:57 +00:00
bert-e 86ece5c264 Merge branch 'w/7.70/bugfix/CLDSRV-489-redirect-folder-index' into tmp/octopus/w/8.6/bugfix/CLDSRV-489-redirect-folder-index 2024-01-10 18:44:56 +00:00
bert-e 10ca6b98fa Merge branch 'w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.8/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
bert-e 171925732f Merge branch 'w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into tmp/octopus/w/8.7/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 18:05:23 +00:00
Taylor McKinnon 6d36f9c867 Merge remote-tracking branch 'origin/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning' into w/8.6/improvement/CLDSRV-475/add_overhead_fields_for_suspended_versioning 2024-01-10 10:04:49 -08:00
bert-e 70e8b20af9 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 0ec5f4fee5 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:20 +00:00
bert-e 6c468a01d9 Merge branch 'w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:51:19 +00:00
bert-e e600677545 Merge branch 'w/8.7/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.8/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
bert-e 72e5da10b7 Merge branch 'w/8.6/bugfix/CLDSRV-485-custom-err-redirect' into tmp/octopus/w/8.7/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 12:34:50 +00:00
Mickael Bourgois de0e7e6449
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-485-custom-err-redirect' into w/8.6/bugfix/CLDSRV-485-custom-err-redirect 2024-01-10 13:15:29 +01:00
bert-e 759817c5a0 Merge branch 'w/8.7/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.8/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
bert-e 035c7e8d7f Merge branch 'w/8.6/bugfix/CLDSRV-482-head-redirect-index' into tmp/octopus/w/8.7/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 10:37:52 +00:00
Mickael Bourgois b8af1225d5
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-482-head-redirect-index' into w/8.6/bugfix/CLDSRV-482-head-redirect-index 2024-01-10 11:28:13 +01:00
bert-e de27a5b88e Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e a4cc5e45f3 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:49 +00:00
bert-e 621cb33680 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:56:48 +00:00
bert-e 9a8b707e82 Merge branch 'w/8.7/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.8/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:32 +00:00
bert-e 002dbe0019 Merge branch 'w/8.6/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.7/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e 59e52f6df2 Merge branch 'w/7.70/bugfix/CLDSRV-488-error-type-bp' into tmp/octopus/w/8.6/bugfix/CLDSRV-488-error-type-bp 2024-01-10 08:54:31 +00:00
bert-e d803bdcadc Merge branch 'w/8.7/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.8/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:57 +00:00
bert-e 4f1b8f25b7 Merge branch 'w/8.6/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.7/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e 94363482c3 Merge branch 'w/7.70/bugfix/CLDSRV-477-putobj-perm-check' into tmp/octopus/w/8.6/bugfix/CLDSRV-477-putobj-perm-check 2024-01-08 13:49:56 +00:00
bert-e e969eeaa20 Merge branches 'w/8.8/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.8 2024-01-05 11:24:59 +00:00
bert-e 2ee78bcf6a Merge branches 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.7 2024-01-05 11:24:58 +00:00
bert-e 64273365d5 Merge branches 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' and 'q/5516/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/q/8.6 2024-01-05 11:24:58 +00:00
bert-e f31fe2f2bf Merge branch 'w/8.7/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.8/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
bert-e ee47cece90 Merge branch 'w/8.6/bugfix/CLDSRV-490-bucket-policy-resource' into tmp/octopus/w/8.7/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 11:10:28 +00:00
Mickael Bourgois 7a5cddacbc
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-490-bucket-policy-resource' into w/8.6/bugfix/CLDSRV-490-bucket-policy-resource 2024-01-05 12:08:54 +01:00
Mickael Bourgois 2d50a76923
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-486-object-redirect-root' into w/8.8/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:59:20 +01:00
Mickael Bourgois 6b4f10ae56
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-486-object-redirect-root' into w/8.7/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:57:36 +01:00
Mickael Bourgois 23eaf89cc3
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-486-object-redirect-root' into w/8.6/bugfix/CLDSRV-486-object-redirect-root 2024-01-04 16:55:48 +01:00
williamlardier dbda5f16a6 CLDSRV-407: bump mongodb to v5.0 in CI 2024-01-04 14:04:20 +01:00
Maha Benzekri 2959c950dd
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.8/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:36:20 +01:00
Maha Benzekri 462ddf7ef1
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.7/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:34:44 +01:00
Maha Benzekri fda42e7399
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests' into w/8.6/bugfix/CLDSRV-480-ByPassGovernance-bucket-policy-tests 2024-01-03 10:32:41 +01:00
Jonathan Gramain ea7b69e313 Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:26:27 -08:00
Jonathan Gramain 8ec1c2f2db Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:08:40 -08:00
Jonathan Gramain 3af6ca5f6d Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 15:06:45 -08:00
bert-e 43f9606598 Merge branch 'w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.8/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:02 +00:00
bert-e be34e5ad59 Merge branch 'w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep' into tmp/octopus/w/8.7/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 17:45:01 +00:00
Jonathan Gramain 5bc64ede43 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-478-bump-arsenal-dep' into w/8.6/bugfix/CLDSRV-478-bump-arsenal-dep 2024-01-02 09:41:03 -08:00
Mickael Bourgois 3ce869cea3
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-479-website-fqdn-index' into w/8.8/bugfix/CLDSRV-479-website-fqdn-index
# Conflicts:
#	package.json
2024-01-02 11:40:28 +01:00
Mickael Bourgois b7960784db
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-479-website-fqdn-index' into w/8.7/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:35:36 +01:00
Mickael Bourgois 5ac10cefa8
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-479-website-fqdn-index' into w/8.6/bugfix/CLDSRV-479-website-fqdn-index 2024-01-02 11:33:49 +01:00
bert-e bf235f3335 Merge branch 'w/8.7/bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.8/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:53 +00:00
bert-e 569c9f4368 Merge branch 'bugfix/CLDSRV-483/ceph-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-483/ceph-tests 2023-12-31 10:39:52 +00:00
Nicolas Humbert 92cf03254a CLDSRV-483 Improve Ruby test output readability and Enable backtrace 2023-12-31 11:08:21 +01:00
Nicolas Humbert c57ae9c8ea CLDSRV-483 Bump ruby patch version to fix malformed header response
More info about the malformed header response: https://github.com/excon/excon/issues/845
2023-12-31 11:08:16 +01:00
bert-e 1a3cb8108c Merge branch 'q/5495/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 042120b17e Merge branch 'q/5495/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e ba4593592d Merge branch 'w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.8 2023-12-15 06:44:04 +00:00
bert-e 6efdb627da Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.7 2023-12-15 06:44:04 +00:00
bert-e 5306bf0b5c Merge branch 'q/5495/7.70/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e 5b22819c3f Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/normal/q/8.6 2023-12-15 06:44:03 +00:00
bert-e e5b692f3db Merge branch 'w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.8/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:49 +00:00
bert-e 548ae8cd12 Merge branch 'w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into tmp/octopus/w/8.7/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 18:30:48 +00:00
Taylor McKinnon 80376405df Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-467/add_reindex_opt_only_count_latest' into w/8.6/improvement/CLDSRV-467/add_reindex_opt_only_count_latest 2023-12-14 10:30:13 -08:00
bert-e 2a919af071 Merge branch 'w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:47 +00:00
bert-e 5c300b8b6c Merge branch 'w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into tmp/octopus/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:21:46 +00:00
Maha Benzekri ad3ebd3db2
CLDSRV-451: fix on gettagging 2023-12-14 18:21:24 +01:00
Maha Benzekri 99068e7265
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.8/improvement/CLDSRV-451-specific-7.70-apis-update 2023-12-14 17:36:17 +01:00
Maha Benzekri cd039d8133
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.7/improvement/CLDSRV-451-specific-7.70-apis-update
In this commit the only api change compared to the 8.6 is the
routeVeeam.
2023-12-14 17:33:03 +01:00
Maha Benzekri dd3ec25d74
Merge remote-tracking branch 'origin/improvement/CLDSRV-451-specific-7.70-apis-update' into w/8.6/improvement/CLDSRV-451-specific-7.70-apis-update
In this merge, we have updated the tagging apis along with the
lifecycle apis and metadata search apis and objectRestore, unit test
for objectRestore has been updated as well.
2023-12-14 17:28:46 +01:00
Maha Benzekri 75b293df8d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.8/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:37:14 +01:00
Maha Benzekri a855e38998
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.7/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:35:02 +01:00
Maha Benzekri 51d5666bec
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-431-misc-api-implicitDeny' into w/8.6/improvement/CLDSRV-431-misc-api-implicitDeny 2023-12-14 13:32:36 +01:00
Maha Benzekri ffe4ea4afe
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.8/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 14:47:24 +01:00
Maha Benzekri a16cfad0fc
CLDSRV-474: mongodb_image on all jobs 2023-12-12 14:06:02 +01:00
bert-e 556163e3e9 Merge branch 'w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into tmp/octopus/w/8.7/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 12:55:56 +00:00
Maha Benzekri 8fe9f16661
CLDSRV-474: Removing the docker-compose commands from the tests.yaml 2023-12-12 13:53:53 +01:00
Maha Benzekri eb9ff85bd9
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut' into w/8.6/improvement/CLDSRV-474-fix-multiObjectDelete-api-aut 2023-12-12 13:52:50 +01:00
Maha Benzekri 869d554e43
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.8/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:42:25 +01:00
Maha Benzekri 2f8b228595
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.7/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:39:20 +01:00
Maha Benzekri 539b2c1630
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-430-delete-api-implicitDeny' into w/8.6/improvement/CLDSRV-430-delete-api-implicitDeny 2023-12-08 18:35:11 +01:00
Maha Benzekri e44b7ed918
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 12:00:50 +01:00
Maha Benzekri 3cb29f7f8e
CLDSRV-429: version bump for version release 2023-12-05 12:00:09 +01:00
Maha Benzekri 4f08a4dff2
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 11:58:27 +01:00
Maha Benzekri 15a1aa7965
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-05 10:58:04 +01:00
Maha Benzekri 4470ee9125
CLDSRV-429: version bump for release 2023-12-05 10:55:31 +01:00
Francois Ferrand d8c12597ea
Release cloudserver 8.8.7
Issue: CLDSRV-471
2023-12-01 19:03:38 +01:00
Francois Ferrand c8eb9025fa
Merge remote-tracking branch 'origin/improvement/CLDSRV-471' into w/8.8/improvement/CLDSRV-471 2023-12-01 19:03:17 +01:00
Francois Ferrand 57e0f71e6a
Release cloudserver 8.7.33
Issue: CLDSRV-471
2023-12-01 19:01:30 +01:00
Francois Ferrand f22f920ee2
Bump arsenal 8.1.115
Issue: CLDSRV-471
2023-12-01 18:42:26 +01:00
Maha Benzekri ed1bb6301d
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.8/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:31:50 +01:00
Maha Benzekri 70dfa5b11b
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.7/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:29:14 +01:00
Maha Benzekri f17e7677fa
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-429-get-apis-implicitDeny' into w/8.6/improvement/CLDSRV-429-get-apis-implicitDeny 2023-12-01 11:27:44 +01:00
Francois Ferrand a4e6f9d034
Add lifecycle restore duration metrics
Issue: CLDSRV-471
2023-11-30 14:55:01 +01:00
Maha Benzekri cf94b9de6a
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:23:08 +01:00
Maha Benzekri da0492d2bb
Merge remote-tracking branch 'origin/development/8.8' into w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:22:32 +01:00
Maha Benzekri 979b9065ed
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:19:27 +01:00
Maha Benzekri d5a3923f74
Merge remote-tracking branch 'origin/development/8.7' into w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:18:06 +01:00
Maha Benzekri 23cbbdaaed
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:17:05 +01:00
Maha Benzekri e506dea140
Merge remote-tracking branch 'origin/development/8.6' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-29 16:15:52 +01:00
bert-e bc291fe3a7 Merge branches 'w/8.8/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.8 2023-11-27 17:16:14 +00:00
bert-e 8dc7432c51 Merge branches 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.7 2023-11-27 17:16:14 +00:00
bert-e 040fe53e53 Merge branches 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' and 'q/5444/7.70/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/q/8.6 2023-11-27 17:16:13 +00:00
bert-e 6f963bdcd9 Merge branch 'w/8.7/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.8/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:56 +00:00
bert-e cd9024fd32 Merge branch 'w/8.6/improvement/CLDSRV-428-put-apis-impDeny' into tmp/octopus/w/8.7/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 16:02:55 +00:00
Maha Benzekri 37649bf49b
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-428-put-apis-impDeny' into w/8.6/improvement/CLDSRV-428-put-apis-impDeny 2023-11-27 17:01:43 +01:00
bert-e dff7610060 Merge branch 'w/8.7/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.8/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:07 +00:00
bert-e 757c2537ef Merge branch 'w/8.6/improvement/CLDSRV-427-permissions-checks' into tmp/octopus/w/8.7/improvement/CLDSRV-427-permissions-checks 2023-11-17 11:30:06 +00:00
Maha Benzekri c445322685
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-427-permissions-checks' into w/8.6/improvement/CLDSRV-427-permissions-checks 2023-11-17 12:28:19 +01:00
bert-e 4515b2adbf Merge branch 'w/8.7/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
bert-e 50ffdd260b Merge branch 'w/8.6/bugfix/CLDSRV-463/bump_cloudserver' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 23:26:50 +00:00
Taylor McKinnon 3836848c05 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/bump_cloudserver' into w/8.6/bugfix/CLDSRV-463/bump_cloudserver 2023-11-16 15:26:23 -08:00
bert-e b5f22d8c68 Merge branches 'w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.8 2023-11-16 19:43:14 +00:00
bert-e 68ff54d49a Merge branches 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.7 2023-11-16 19:43:13 +00:00
bert-e a74b3eacf8 Merge branches 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' and 'q/5403/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/q/8.6 2023-11-16 19:43:13 +00:00
bert-e 3fe5579c80 Merge branch 'w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.8/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:08 +00:00
bert-e 3fdd2bce21 Merge branch 'w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers' into tmp/octopus/w/8.7/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 19:25:07 +00:00
Taylor McKinnon 44e6eb2550 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-463/strictly_check_algo_headers' into w/8.6/bugfix/CLDSRV-463/strictly_check_algo_headers 2023-11-16 11:20:55 -08:00
bert-e c9b512174f Merge branches 'w/8.8/bugfix/CLDSRV-460-forward-system-signals' and 'q/5431/8.7/bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/q/8.8 2023-11-15 10:14:18 +00:00
bert-e 7b48624cf7 Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into q/8.7 2023-11-15 10:14:17 +00:00
bert-e 55b07def2e Merge branch 'bugfix/CLDSRV-460-forward-system-signals' into tmp/octopus/w/8.8/bugfix/CLDSRV-460-forward-system-signals 2023-11-15 09:43:35 +00:00
bert-e 62ae2b2c69 Merge branch 'w/7.70/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.6/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e fcc9468b63 Merge branch 'w/8.6/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.7/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e efc44a620d Merge branch 'w/8.7/improvement/CLDSRV-468-version-bump' into tmp/octopus/w/8.8/improvement/CLDSRV-468-version-bump 2023-11-14 11:06:35 +00:00
bert-e 1bc19b39d7 Merge branches 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.7 2023-11-13 17:20:17 +00:00
bert-e b5fa3a1fd3 Merge branches 'w/8.8/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.8 2023-11-13 17:20:17 +00:00
bert-e 68a6fc659c Merge branches 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' and 'q/5406/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/q/8.6 2023-11-13 17:20:16 +00:00
bert-e c0fc958365 Merge branch 'w/8.7/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.8/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:26 +00:00
bert-e d3c74d2c16 Merge branch 'w/8.6/improvement/CLDSRV-426-acl-impl-deny' into tmp/octopus/w/8.7/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 16:03:25 +00:00
Maha Benzekri 9001285177
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-426-acl-impl-deny' into w/8.6/improvement/CLDSRV-426-acl-impl-deny 2023-11-13 17:02:22 +01:00
Kerkesni 07eda89a3f
forward system signals to the node process using tini
npm run doesn’t handle signal forwarding and crashes
on the SIGTERM signal sent by Kubernetes.

Tini spawns a process at PID 1 that handles forwarding
system signals to all it's child processes.

Issue: CLDSRV-460
2023-11-13 12:07:29 +01:00
bert-e 27b4066ca4 Merge branch 'w/8.6/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.7/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 2ee5b356fa Merge branch 'w/8.7/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.8/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:45 +00:00
bert-e 233955a0d3 Merge branch 'w/7.70/improvement/CLDSRV-466/timestamps_in_stderr' into tmp/octopus/w/8.6/improvement/CLDSRV-466/timestamps_in_stderr 2023-11-10 16:18:44 +00:00
bert-e f5d3433413 Merge branches 'w/8.8/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.8 2023-11-09 17:31:36 +00:00
bert-e 62b4b9bc25 Merge branches 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.7 2023-11-09 17:31:35 +00:00
bert-e ce4b2b5a27 Merge branches 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' and 'q/5405/7.70/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/q/8.6 2023-11-09 17:31:34 +00:00
bert-e ec56c77881 Merge branch 'w/8.7/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.8/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e d0abde3962 Merge branch 'w/8.6/improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.7/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:07 +00:00
bert-e f08a3f434b Merge branch 'improvement/CLDSRV-464/support_mpu_scuba' into tmp/octopus/w/8.6/improvement/CLDSRV-464/support_mpu_scuba 2023-11-08 17:19:06 +00:00
bert-e fdc682f2db Merge branches 'w/8.8/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.7/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.8 2023-11-07 09:32:43 +00:00
bert-e b184606dc2 Merge branches 'w/8.7/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/8.6/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.7 2023-11-07 09:32:43 +00:00
bert-e 172ec4a714 Merge branches 'w/8.6/improvement/CLDSRV-424-apicall-auth-update' and 'q/5322/7.70/improvement/CLDSRV-424-apicall-auth-update' into tmp/octopus/q/8.6 2023-11-07 09:32:42 +00:00
Maha Benzekri 9ce0f2c2b6
Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-424-apicall-auth-update' into w/8.8/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:20:41 +01:00
Maha Benzekri 43b4e0c713
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-424-apicall-auth-update' into w/8.7/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:18:48 +01:00
Maha Benzekri 2bda761518
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-424-apicall-auth-update' into w/8.6/improvement/CLDSRV-424-apicall-auth-update 2023-11-07 09:16:48 +01:00
bert-e 9185f16554 Merge branch 'w/8.7/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.8/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
bert-e 2df9a57f9c Merge branch 'w/8.6/bugfix/CLDSRV-462/tags' into tmp/octopus/w/8.7/bugfix/CLDSRV-462/tags 2023-10-25 18:44:17 +00:00
Nicolas Humbert c96706ff28 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-462/tags' into w/8.6/bugfix/CLDSRV-462/tags 2023-10-25 20:42:14 +02:00
bert-e 68535f83d6 Merge branches 'w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.8 2023-10-24 18:40:33 +00:00
bert-e 41d63650be Merge branches 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.7 2023-10-24 18:40:32 +00:00
bert-e 4ebb5d449a Merge branches 'w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' and 'q/5384/7.70/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/q/8.6 2023-10-24 18:40:32 +00:00
bert-e 12185f7c3b Merge branches 'w/8.8/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.7/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.8 2023-10-19 20:36:18 +00:00
bert-e 5f82ee2d0e Merge branches 'w/8.7/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/8.6/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.7 2023-10-19 20:36:18 +00:00
bert-e 7e0f9c63fe Merge branches 'w/8.6/improvement/CLDSRV-449/pass_overhead_fields' and 'q/5354/7.70/improvement/CLDSRV-449/pass_overhead_fields' into tmp/octopus/q/8.6 2023-10-19 20:36:17 +00:00
Taylor McKinnon d72bc5c6b9 Merge remote-tracking branch 'origin/w/8.7/improvement/CLDSRV-449/pass_overhead_fields' into w/8.8/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 13:16:26 -07:00
Taylor McKinnon 0e47810963 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-449/pass_overhead_fields' into w/8.7/improvement/CLDSRV-449/pass_overhead_fields 2023-10-19 12:40:23 -07:00
Taylor McKinnon 8d83546ee3 Merge remote-tracking branch 'origin/improvement/CLDSRV-449/pass_overhead_fields' into w/8.6/improvement/CLDSRV-449/pass_overhead_fields 2023-10-16 12:01:03 -07:00
bert-e 3b36cef85f Merge branch 'w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into tmp/octopus/w/8.8/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 18:57:46 +00:00
Jonathan Gramain 114b885c7f Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.7/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:35:23 -07:00
Jonathan Gramain e56d4e3744 Merge remote-tracking branch 'origin/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion' into w/8.6/bugfix/CLDSRV-458-fixBucketdParamsUpdatingLegacyNullVersion 2023-10-11 11:05:32 -07:00
williamlardier 3b95c033d2 Merge remote-tracking branch 'origin/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal' into w/8.8/bugfix/CLDSRV-457-fix-memory-leak-in-arsenal 2023-10-06 17:59:22 +02:00
williamlardier 04091dc316 CLDSRV-457: bump project version 2023-10-06 14:54:35 +02:00
williamlardier 56023a80ed CLDSRV-457: bump arsenal 2023-10-06 14:54:34 +02:00
bert-e 2deaebd89a Merge branch 'w/8.7/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.8/bugfix/CLDSRV-455/skip 2023-10-05 16:41:46 +00:00
bert-e c706ccf9c6 Merge branch 'w/8.6/bugfix/CLDSRV-455/skip' into tmp/octopus/w/8.7/bugfix/CLDSRV-455/skip 2023-10-05 16:41:45 +00:00
Nicolas Humbert 4afb2476f8 Merge remote-tracking branch 'origin/bugfix/CLDSRV-455/skip' into w/8.6/bugfix/CLDSRV-455/skip 2023-10-05 18:21:54 +02:00
Francois Ferrand 583ea8490f
Bump 8.8.3
Issue: CLDSRV-454
2023-10-04 11:18:25 +02:00
bert-e 85a9480793 Merge branch 'w/8.8/improvement/CLDSRV-446/bump' into tmp/octopus/q/8.8 2023-10-03 10:44:50 +00:00
bert-e be2f65b69e Merge branch 'bugfix/CLDSRV-423-test-sproxyd' into q/8.8 2023-10-03 10:12:16 +00:00
bert-e 1ee6d0a87d Merge branch 'w/8.7/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.8/improvement/CLDSRV-446/bump 2023-10-02 15:25:13 +00:00
bert-e 224af9a5d2 Merge branch 'w/8.6/improvement/CLDSRV-446/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-446/bump 2023-10-02 15:25:12 +00:00
Nicolas Humbert 9e2ad48c5c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/bump' into w/8.6/improvement/CLDSRV-446/bump 2023-10-02 17:12:32 +02:00
bert-e 74f05377f0 Merge branch 'w/8.7/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.8/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:08 +00:00
bert-e 111e14cc89 Merge branch 'w/8.6/improvement/CLDSRV-446/listing-scanned-limit' into tmp/octopus/w/8.7/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 13:38:07 +00:00
Nicolas Humbert fd6fb5a26c Merge remote-tracking branch 'origin/improvement/CLDSRV-446/listing-scanned-limit' into w/8.6/improvement/CLDSRV-446/listing-scanned-limit 2023-10-02 15:30:02 +02:00
Florent Monjalet 00b20f00d1 Merge remote-tracking branch 'origin/development/8.8' into bugfix/CLDSRV-423-test-sproxyd 2023-10-02 13:45:58 +02:00
Florent Monjalet a91d53a12c CLDSRV-423: test distinct and overwriting PUTs 2023-09-27 11:58:20 +02:00
Florent Monjalet 63d2637046 CLDSRV-423: improve async series usage in test 2023-09-27 11:50:44 +02:00
Maha Benzekri 5d416ad190
Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-444-id-resource-policy' into w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:49:03 +02:00
Maha Benzekri ff29cda03f
Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-444-id-resource-policy' into w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:47:33 +02:00
Maha Benzekri 5685b2e972
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 11:45:19 +02:00
Florent Monjalet cb8baf2dab CLDSRV-423: provide a proper dockerfile for test sproxyd 2023-09-27 11:36:49 +02:00
bert-e 22f470c6eb Merge branch 'w/8.7/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.8/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:17 +00:00
bert-e e510473116 Merge branch 'w/8.6/bugfix/CLDSRV-444-id-resource-policy' into tmp/octopus/w/8.7/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 08:28:16 +00:00
Maha Benzekri d046e8a294
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-444-id-resource-policy' into w/8.6/bugfix/CLDSRV-444-id-resource-policy 2023-09-27 10:27:47 +02:00
Florent Monjalet 17a6808fe4 CLDSRV-423: bump arsenal and sproxydclient to fix SPRXCLT-12 2023-08-31 19:07:44 +02:00
Florent Monjalet df646e4802 CLDSRV-423: disable failing tests that have just been reenabled
They had been disabled for a long while and cannot be reenabled yet
because they don't pass, so keep on skipping them for now.

Tickets have been created to take care of them:

- CLDSRV-440
- CLDSRV-441
- CLDSRV-442
- CLDSRV-443
2023-08-31 19:06:34 +02:00
Florent Monjalet 267770d256 CLDSRV-423: reproduce SPRXCLT-12 more often 2023-08-31 19:06:34 +02:00
Florent Monjalet 1b92dc2c05 CLDSRV-423: perform two successive put in multiple backend tests
This tests for SPRXCLT-12 issue
2023-08-31 19:06:34 +02:00
Florent Monjalet f80bb2f34b CLDSRV-423: don't run sproxyd test when testing Ceph 2023-08-31 19:06:34 +02:00
Florent Monjalet 4f89b67bb9 CLDSRV-423: Add missing mock logger method 2023-08-31 19:06:34 +02:00
Florent Monjalet 8b5630923c CLDSRV-423: refactor multiple backend put tests to avoid duplication 2023-08-31 19:06:34 +02:00
Florent Monjalet 9ff5e376e5 CLDSRV-423: reenable a good chunk of multiple backend tests 2023-08-31 19:06:34 +02:00
Florent Monjalet a9b5a2e3a4 CLDSRV-423: add put test for sproxyd 2023-08-31 19:06:34 +02:00
Florent Monjalet 7e9ec22ae3 CLDSRV-423: deploy sproxyd for multiple backend tests 2023-08-31 19:06:34 +02:00
bert-e 9d4664ae06 Merge branch 'w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.8/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e 662265ba2e Merge branch 'w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.7/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:30 +00:00
bert-e c7da82dda7 Merge branch 'w/7.70/bugfix/CLDSRV-439/bump_arsenal_for_bugfix' into tmp/octopus/w/8.6/bugfix/CLDSRV-439/bump_arsenal_for_bugfix 2023-08-30 16:44:29 +00:00
Taylor McKinnon 17e4f14f9c Merge remote-tracking branch 'origin/w/8.7/bugfix/CLDSRV-413/bump_version' into w/8.8/bugfix/CLDSRV-413/bump_version 2023-08-18 10:10:01 -07:00
Taylor McKinnon 014b071536 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-413/bump_version' into w/8.7/bugfix/CLDSRV-413/bump_version 2023-08-18 10:07:14 -07:00
Taylor McKinnon 9130f323d4 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/bump_version' into w/8.6/bugfix/CLDSRV-413/bump_version 2023-08-18 10:05:33 -07:00
bert-e 2d45f92ae1 Merge branches 'w/8.8/feature/CLDSRV-420/backport' and 'q/5268/8.7/feature/CLDSRV-420/backport' into tmp/octopus/q/8.8 2023-08-18 14:53:18 +00:00
bert-e 48452496fa Merge branches 'w/8.7/feature/CLDSRV-420/backport' and 'q/5268/8.6/feature/CLDSRV-420/backport' into tmp/octopus/q/8.7 2023-08-18 14:53:18 +00:00
bert-e b89773eba6 Merge branch 'q/5268/7.70/feature/CLDSRV-420/backport' into tmp/normal/q/8.6 2023-08-18 14:53:18 +00:00
bert-e 18bf6b8d4a Merge branch 'w/8.7/feature/CLDSRV-420/backport' into tmp/octopus/w/8.8/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
bert-e 858c31a542 Merge branch 'w/8.6/feature/CLDSRV-420/backport' into tmp/octopus/w/8.7/feature/CLDSRV-420/backport 2023-08-18 11:19:15 +00:00
Nicolas Humbert 75a759de27 Merge remote-tracking branch 'origin/feature/CLDSRV-420/backport' into w/8.6/feature/CLDSRV-420/backport 2023-08-18 12:57:48 +02:00
bert-e 19d3e0bc9d Merge branch 'w/8.7/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.8/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:03:00 +00:00
bert-e bac044dc8f Merge branch 'w/8.6/bugfix/CLDSRV-413/crr_existing_null_version' into tmp/octopus/w/8.7/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 17:02:59 +00:00
Taylor McKinnon 8f77cd18c8 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-413/crr_existing_null_version' into w/8.6/bugfix/CLDSRV-413/crr_existing_null_version 2023-08-17 10:02:29 -07:00
bert-e 8c0f709014 Merge branch 'bugfix/CLDSRV-422' into tmp/octopus/w/8.8/bugfix/CLDSRV-422 2023-08-16 11:46:43 +00:00
Francois Ferrand ce92d33a5d
Fix use of http_requests_total metrics
It was missed when metric names were updated. In addition, the dashboard
was not up-to-date with the python source, and needed to be regenerated.

Issue: CLDSRV-422
2023-08-14 14:36:14 +02:00
Kerkesni 0381cce85c
Merge remote-tracking branch 'origin/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring' into w/8.8/improvement/CLDSRV-408-Fix-metadata-getting-deleted-when-restoring 2023-08-10 16:07:42 +02:00
Kerkesni 20a08a2a4e
bump version to 8.7.26 2023-08-10 16:04:25 +02:00
Kerkesni ff73d8ab12
add tests for keeping object properties after restore
Issue: CLDSRV-408
2023-08-10 16:03:58 +02:00
Kerkesni 1ee44bc6d3
keep same object properties after a restore of a cold object
Object properties such as ACLs and custom user metadata should
not be removed after the restore of a cold object.

Issue: CLDSRV-408
2023-08-10 12:58:18 +02:00
bert-e 614e876536 Merge branches 'w/8.8/improvement/CLDSRV-400' and 'q/5191/8.7/improvement/CLDSRV-400' into tmp/octopus/q/8.8 2023-08-09 16:42:42 +00:00
bert-e b40a77d94b Merge branch 'improvement/CLDSRV-400' into q/8.7 2023-08-09 16:42:42 +00:00
bert-e 3a3a73b756 Merge branch 'improvement/CLDSRV-400' into tmp/octopus/w/8.8/improvement/CLDSRV-400 2023-08-09 16:19:33 +00:00
bert-e 3f6e85590d Merge branches 'w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.8 2023-08-07 17:27:19 +00:00
bert-e de589a07e8 Merge branches 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.6 2023-08-07 17:27:18 +00:00
bert-e bc009945d2 Merge branches 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' and 'q/5243/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/q/8.7 2023-08-07 17:27:18 +00:00
bert-e 3ac30d9bab Merge branch 'w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.8/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e 32204fbfbf Merge branch 'w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.7/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:46 +00:00
bert-e b1eda2a73a Merge branch 'w/7.70/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710' into tmp/octopus/w/8.6/bugfix/CLDSRV-418/CLDSRV_196_backport_to_710 2023-07-20 16:27:45 +00:00
bert-e 5a26e1a80d Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 507a2d4ff5 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:27:00 +00:00
bert-e 8cdd35950b Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-20 08:26:59 +00:00
bert-e 1207a6fb70 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e 5883286864 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:28 +00:00
bert-e b206728342 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-19 08:45:27 +00:00
bert-e 2a37e809d9 Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:22 +00:00
bert-e 86ce7691cd Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
bert-e c04f663480 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-18 12:47:21 +00:00
bert-e e466b5e92a Merge branch 'w/8.7/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.8/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:16 +00:00
bert-e a4bc10f730 Merge branch 'w/8.6/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.7/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
bert-e e826033bf0 Merge branch 'w/7.70/improvement/CLDSRV-411-impose-last-modified' into tmp/octopus/w/8.6/improvement/CLDSRV-411-impose-last-modified 2023-07-17 16:54:15 +00:00
Nicolas Humbert c480301e95 Merge remote-tracking branch 'origin/improvement/CLDSRV-414/bump' into w/8.8/improvement/CLDSRV-414/bump 2023-07-14 15:52:57 -04:00
Nicolas Humbert 276be285cc CLDSRV-414 bump version 2023-07-14 15:47:13 -04:00
bert-e 897d41392a Merge branch 'w/8.7/bugfix/CLDSRV-412/null' into tmp/octopus/w/8.8/bugfix/CLDSRV-412/null 2023-07-14 14:08:38 +00:00
bert-e f4e3a19d61 Merge branch 'bugfix/CLDSRV-412/null' into tmp/octopus/w/8.7/bugfix/CLDSRV-412/null 2023-07-14 14:08:37 +00:00
Nicolas Humbert ee84a03d2c bump arsenal 2023-07-14 09:49:30 -04:00
Nicolas Humbert 98f855f997 CLDSRV-412 Test null version in Lifecycle list of non-current versions 2023-07-14 09:48:47 -04:00
williamlardier 7c52fcbbb0
CLDSRV-402: bump project version 2023-07-13 17:45:06 +02:00
bert-e da52688a39 Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:26 +00:00
bert-e 1cb54a66f8 Merge branch 'w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:12:25 +00:00
williamlardier 0bb61ddb5b
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 17:12:05 +02:00
bert-e d9fffdad9e Merge branch 'w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into tmp/octopus/w/8.8/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 13:08:26 +00:00
williamlardier 389c32f819
Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.7/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 15:06:34 +02:00
williamlardier c2df0bd3eb
Merge remote-tracking branch 'origin/improvement/CLDSRV-402-optimize-multiobjectdelete-api' into w/8.6/improvement/CLDSRV-402-optimize-multiobjectdelete-api 2023-07-13 14:22:48 +02:00
Kerkesni d26b8bcfcc
test keeping same storage class when restoring a cold object
Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
Kerkesni e4634621ee
keep storage class as cold for restored objects
To be compliant with the AWS S3 standard, the storage class
of restored objects should be left as cold location

Issue: CLDSRV-400
2023-06-23 11:22:10 +02:00
williamlardier 0b58b3ad2a
CLDSRV390: bump mongodb to 4.4 2023-06-22 16:56:53 +02:00
bert-e 652bf92536 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:43 +00:00
bert-e c5b1ef63ee Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 13:14:42 +00:00
bert-e 344ee8a014 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
bert-e 5d7a434306 Merge branch 'w/7.70/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:44:35 +00:00
bert-e b7e7f65d52 Merge branch 'w/8.6/improvement/CLDSRV-409-fix-python-version' into tmp/octopus/w/8.7/improvement/CLDSRV-409-fix-python-version 2023-06-22 12:37:53 +00:00
williamlardier d00320a8ba
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-409-fix-python-version' into w/8.6/improvement/CLDSRV-409-fix-python-version 2023-06-22 14:36:28 +02:00
bert-e c5b7450a4d Merge branches 'w/8.7/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.7 2023-06-12 22:01:22 +00:00
bert-e eb5affdced Merge branches 'w/8.6/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.6 2023-06-12 22:01:21 +00:00
bert-e cdaf6db929 Merge branches 'w/8.5/improvement/CLDSRV-388-implement-GHAS' and 'q/5169/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/q/8.5 2023-06-12 22:01:21 +00:00
Nicolas Humbert 18c8d4ecac CLDSRV-404 bump version 2023-06-09 11:48:56 -04:00
Nicolas Humbert c8150c6857 CLDSRV-397 Introduce the time-progression-factor flag
The "time-progression-factor" variable serves as a testing-specific feature that accelerates the progression of time within a system.
By reducing the significance of each day, it enables the swift execution of specific actions, such as expiration, transition, and object locking, which are typically associated with longer timeframes.

This capability allows for efficient testing and evaluation of outcomes, optimizing the observation of processes that would normally take days or even years.
It's important to note that this variable is intended exclusively for testing purposes and is not employed in live production environments, where real-time progression is crucial for accurate results.
2023-06-08 12:14:36 -04:00
bert-e 399a2a53ab Merge branch 'improvement/CLDSRV-399/addWorkflowDispatch' into q/8.7 2023-06-05 20:39:18 +00:00
Alexander Chan bbad049b5f CLDSRV-399: add workflow_dispatch 2023-06-05 11:30:35 -07:00
bert-e 2a4e2e1584 Merge branch 'w/8.6/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.7/improvement/CLDSRV-398/bump 2023-06-02 20:19:28 +00:00
bert-e 08e43f5084 Merge branch 'w/8.5/improvement/CLDSRV-398/bump' into tmp/octopus/w/8.6/improvement/CLDSRV-398/bump 2023-06-02 20:19:27 +00:00
Nicolas Humbert cc153c99d6 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-398/bump' into w/8.5/improvement/CLDSRV-398/bump 2023-06-02 15:58:58 -04:00
bert-e b304d05614 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:27 +00:00
bert-e 751f6ce559 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
bert-e 0330597679 Merge branch 'w/7.70/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 18:31:26 +00:00
bert-e 004bd63368 Merge branch 'w/8.6/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:33 +00:00
bert-e e047ae6fbb Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 16:12:32 +00:00
Nicolas Humbert ebca8dd05e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-02 12:09:50 -04:00
Nicolas Humbert 960d736962 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-396/put-metadata-null' into w/8.7/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 11:24:34 -04:00
bert-e 11098dd113 Merge branch 'w/8.5/bugfix/CLDSRV-396/put-metadata-null' into tmp/octopus/w/8.6/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 15:13:36 +00:00
Nicolas Humbert 9cc7362fbd Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-396/put-metadata-null' into w/8.5/bugfix/CLDSRV-396/put-metadata-null 2023-06-01 08:27:34 -04:00
KillianG 32401c9a83
bump 8.7.23 2023-05-30 09:40:36 +00:00
KillianG 5f05b676cc
Merge remote-tracking branch 'origin/development/8.7' into HEAD 2023-05-26 09:46:21 +00:00
KillianG fd662a8c2c
Bump arsenal 8.1.101 and test delete markers are not listed when bucket versionning is suspended
Issue: CLDSRV-347
2023-05-26 08:46:42 +00:00
bert-e 5d54dd58be Merge branch 'bugfix/CLDSRV-393' into q/8.7 2023-05-25 19:47:24 +00:00
Nicolas Humbert 1bd0deafcf CLDSRV-395 bump to 8.7.21 2023-05-25 14:02:47 -04:00
Francois Ferrand 7c788d3dbf Bump github actions
Issue: CLDSRV-393
2023-05-25 14:02:47 -04:00
Nicolas Humbert 50cb6a2bf1 CLDSRV-374 putMetadata API route is not updating null version properly
Instead of using the provided "null" value, the metadata "null version id" is now used when updating the metadata of a null version.
2023-05-25 09:40:20 -04:00
bert-e 58f7bb2877 Merge branch 'w/8.6/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.7/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e f899337284 Merge branch 'w/8.5/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.6/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:13 +00:00
bert-e b960a913ec Merge branch 'w/7.70/improvement/CLDSRV-388-implement-GHAS' into tmp/octopus/w/8.5/improvement/CLDSRV-388-implement-GHAS 2023-05-24 22:42:12 +00:00
Francois Ferrand ea284508d7
Update x-amz-restore when updating the expiry date
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 0981fa42f3
Add version name in release runs
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 7e63064a52
Bump github actions
Issue: CLDSRV-393
2023-05-24 19:20:52 +02:00
Francois Ferrand 71b21e40ca
Add eslint rule to prevent exclusive tests
Lint will fail if it finds any `describe.only` or `it.only`.

Issue: CLDSRV-393
2023-05-24 17:14:24 +02:00
Francois Ferrand ff894bb545
Remove describe.only
This should never have been commited, as it disables most unit tests from
CI.

This caused some tests to actually fail:
* bad import of refactored `objectDelete` api
* getting an object while transitioning (archiving) is allowed

Issue: CLDSRV-393
2023-05-24 17:09:33 +02:00
Francois Ferrand ae9f24e1bb
Update expiry date on s3:restore on restored object
If the object is already restored, we simply need to update the expiry
date, as per AWS docs:
> After restoring an archived object, you can update the restoration
> period by reissuing the request with a new period. Amazon S3 updates
> the restoration period relative to the current time.

Issue: CLDSRV-393
2023-05-24 16:52:45 +02:00
bert-e 2dc01ce3ed Merge branch 'w/8.7/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/q/8.7 2023-05-15 16:39:05 +00:00
Kerkesni 9bd9bef6c7
bump version in package.json to 8.7.20
Issue: CLDSRV-386
2023-05-11 10:34:27 +02:00
bert-e 3a8bbefb6c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
bert-e a6a5c273d5 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 14:27:25 +00:00
Dimitrios Vasilas c329d9684d Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-10 16:23:51 +02:00
Kerkesni 6479076fec
bump node version to 16.20 in Dockerfile
Issue: CLDSRV-386
2023-05-10 13:35:54 +02:00
bert-e c436e2657c Merge branch 'w/8.5/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.6/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
bert-e df45f481d0 Merge branch 'w/8.6/improvement/CLDSRV-370-build-dev-img-release' into tmp/octopus/w/8.7/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 17:22:48 +00:00
Dimitrios Vasilas 406f3f0093 Revert "CLDSRV-370: docker-entrypoint: make bucketd bootstrap configurable"
This reverts commit 1d76f61d88.
2023-05-09 19:19:47 +02:00
Dimitrios Vasilas 6952b91539 CLDSRV-370: Pin virtualenv version to 20.21.0
Virtualenv setup fails with the latest version (20.23)
2023-05-09 19:18:40 +02:00
Dimitrios Vasilas eea1ebb5ec Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-370-build-dev-img-release' into w/8.5/improvement/CLDSRV-370-build-dev-img-release 2023-05-09 19:17:35 +02:00
bert-e cd8c589eba Merge branch 'improvement/CLDSRV-375/exclude-keys' into tmp/octopus/w/8.7/improvement/CLDSRV-375/exclude-keys 2023-04-28 18:20:48 +00:00
williamlardier daec2661ae
CLDSRV-385: use mongodb v4.2 for the CI 2023-04-21 15:03:01 +02:00
Francois Ferrand 0f266371a0
Bump version 8.7.18
Issue: CLDSRV-383
2023-04-17 23:36:28 +02:00
Francois Ferrand 73e56963bf
Fix originOp when deleting a version
DeleteMarkerCreated was sent instead of the expect Delete, which breaks
bucket notifications.

Issue: CLDSRV-383
2023-04-17 23:14:49 +02:00
Nicolas Humbert 4c189b2d9e CLDSRV-375 Exclude already transitioned keys from lifecycle listings 2023-04-16 21:54:16 -07:00
Alexander Chan fb11d0f42e Merge remote-tracking branch 'origin/feature/CLDSRV-368/addBackbeatRouteForIndexingOps' into w/8.7/feature/CLDSRV-368/addBackbeatRouteForIndexingOps 2023-04-14 18:35:38 -07:00
Alexander Chan fe6690da92 bump arsenal 2023-04-14 18:08:42 -07:00
williamlardier 9cbd9f7be8
CLDSRV-381: bump project version 2023-04-14 22:29:03 +02:00
williamlardier c2fc8873cb
CLDSRV-381: bump arsenal 2023-04-14 22:28:47 +02:00
Francois Ferrand bee1ae04bf
Bump version 8.7.15
Issue: CLDSRV-380
2023-04-14 09:06:04 +02:00
Francois Ferrand eb86552a57
Allow reading transition-in-progress objects
This “transition in progress” state does not exist in AWS S3 (so we have no reference), and we need to access the data for cold storage framework.

When the transition has been performed, the archive id and storage class will be updated first (as well as clearing the ‘transitioning’ flag) before triggering the “GC” to remove the (local) data.

So we are sure that data is available in this state, and that simply checking that the object is in cold storage is enough.

Issue: CLDSRV-380
2023-04-14 09:02:32 +02:00
Alexander Chan 80fbf78d62 CLDSRV-368: add indexing routes 2023-04-13 15:17:03 -07:00
bert-e f5d8f2fac5 Merge branch 'w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.7/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
bert-e b1e13d6efa Merge branch 'w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag' into tmp/octopus/w/8.6/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 19:07:07 +00:00
Jonathan Gramain e7ef437b27 Merge remote-tracking branch 'origin/feature/CLDSRV-359-passGetDeleteMarkerFlag' into w/8.5/feature/CLDSRV-359-passGetDeleteMarkerFlag 2023-04-13 11:42:08 -07:00
bert-e 36e841b542 Merge branches 'w/8.7/feature/CLDSRV-355-activateNullKeys' and 'q/5069/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.7 2023-04-13 18:35:42 +00:00
bert-e a2404ed622 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/q/8.6 2023-04-13 18:35:41 +00:00
williamlardier 1d12a430a0
CLDSRV-376: bump to 8.7.13 2023-04-13 16:59:28 +02:00
williamlardier bea27b4fb4
CLDSRV-376: update mongoClient used in functional tests 2023-04-13 16:59:13 +02:00
williamlardier 76405d9179
CLDSRV-376: bump mongodb driver 2023-04-13 16:59:12 +02:00
Alexander Chan 31b7f1e71c bump version 2023-04-12 15:36:00 -07:00
Alexander Chan 8674cac9f8 CLDSRV-379: bump arsenal 2023-04-12 15:35:27 -07:00
KillianG d5b666a246
Better indentation and use bool isExpiration only in the first function, after, uses directly originOp string
Issue: CLDSRV-367
2023-04-11 14:59:02 +00:00
KillianG 4360772971
Improve the way we pass originOp to make it clearer
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 6e152e33d5
Use boolean in parameter instead of hardcoded originOP
Issue: CLDSRV-367
2023-04-11 13:43:37 +00:00
KillianG 94f34979a5
add origin op to all delete object calls
Issue: CLDSRV-367
2023-04-11 13:43:36 +00:00
bert-e 4be430c313 Merge branch 'improvement/CLDSRV-372/vid' into q/8.6 2023-04-07 18:35:02 +00:00
bert-e 4b0f165b46 Merge branches 'w/8.7/improvement/CLDSRV-372/vid' and 'q/5109/8.6/improvement/CLDSRV-372/vid' into tmp/octopus/q/8.7 2023-04-07 18:35:02 +00:00
Nicolas Humbert 3590377554 Merge remote-tracking branch 'origin/improvement/CLDSRV-372/vid' into w/8.7/improvement/CLDSRV-372/vid 2023-04-07 07:58:01 -04:00
Nicolas Humbert f7f77c6cd2 CLDSRV-372 Current lifecycle versions should include version id 2023-04-06 19:09:04 -04:00
bert-e 8a08f97492 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:48 +00:00
bert-e a908d09cc8 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 18:16:47 +00:00
Jonathan Gramain 170a68a4f8 CLDSRV-355 [8.5+] fixup problematic automerge
Restore missing `require('../Config')` in lib/api/objectDelete.js
2023-04-05 10:57:42 -07:00
bert-e 448afa50e3 Merge branch 'w/8.6/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:47 +00:00
bert-e a0fff19611 Merge branch 'w/8.5/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:46 +00:00
bert-e 6ad1643ba8 Merge branch 'w/8.4/feature/CLDSRV-355-activateNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-355-activateNullKeys 2023-04-05 00:48:45 +00:00
Jonathan Gramain 5ce253ef62 Merge remote-tracking branch 'origin/feature/CLDSRV-355-activateNullKeys' into w/8.4/feature/CLDSRV-355-activateNullKeys 2023-04-04 17:27:11 -07:00
bert-e 5dd8d9057a Merge branch 'w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 50b738cfff Merge branch 'w/8.6/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 2be3ce21c7 Merge branch 'w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:49 +00:00
bert-e 70ff6fc4ee Merge branch 'feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-358-preprocessingVersioningDeleteNullKeys 2023-04-04 22:55:48 +00:00
bert-e c5214d19a6 Merge branch 'w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
bert-e 951a98fcaf Merge branch 'w/8.6/feature/CLDSRV-378-forceEnableNullCompatMode' into tmp/octopus/w/8.7/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 22:27:20 +00:00
Jonathan Gramain ebb0fed48a Merge remote-tracking branch 'origin/feature/CLDSRV-378-forceEnableNullCompatMode' into w/8.5/feature/CLDSRV-378-forceEnableNullCompatMode 2023-04-04 15:17:14 -07:00
Jonathan Gramain 5f85c14ab9 CLDSRV-378 [8.x] force null version compat mode
force null version compatibility mode to be enabled, so that
Cloudserver stays compatible with MongoDB backend not supporting null
keys.

Remove the associated aws-sdk functional test suite related to
compatibility mode
2023-04-04 14:39:00 -07:00
bert-e 8ca770dcb7 Merge branch 'w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.7/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:15 +00:00
bert-e 7923977300 Merge branch 'w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into tmp/octopus/w/8.6/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 21:28:14 +00:00
Jonathan Gramain 9fb232861f Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.5/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 14:03:37 -07:00
Jonathan Gramain 6cf4e291fa Merge remote-tracking branch 'origin/feature/CLDSRV-357-versioningPreprocessingNullKeySupport' into w/8.4/feature/CLDSRV-357-versioningPreprocessingNullKeySupport 2023-04-04 13:21:30 -07:00
bert-e 3585b8d5eb Merge branch 'w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 9331c0a375 Merge branch 'w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
bert-e 70f368408d Merge branch 'w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 17:12:28 +00:00
Jonathan Gramain 4285c18e44 Merge remote-tracking branch 'origin/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys' into w/8.4/feature/CLDSRV-353-modifyPreprocessingVersioningForNullKeys 2023-04-04 09:49:28 -07:00
bert-e 0a1489ee46 Merge branch 'w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.7/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:59 +00:00
bert-e 71f80544ac Merge branch 'w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.6/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 270080a75b Merge branch 'w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.5/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:58 +00:00
bert-e 74717b2acb Merge branch 'w/7.70/improvement/CLDSRV-373-func-test-reproducing-s3c-5139' into tmp/octopus/w/8.4/improvement/CLDSRV-373-func-test-reproducing-s3c-5139 2023-04-03 16:34:57 +00:00
Xin LI de5b4331e2 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 11:00:24 +02:00
Xin LI e1a4f1ef8c bugfix: CLDSRV-365 bump 2023-03-31 10:58:07 +02:00
bert-e 46dff0321d Merge branch 'w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.7/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:07 +00:00
bert-e f3c7580510 Merge branch 'w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
bert-e 2145bb3ae3 Merge branch 'w/8.5/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into tmp/octopus/w/8.6/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 08:54:06 +00:00
Xin LI 468162c81c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted' into w/8.4/bugfix/CLDSRV-365-fix-legal-hold-can-be-deleted 2023-03-31 10:53:21 +02:00
bert-e ddc6ea72be Merge branch 'improvement/CLDSRV-371/etag' into tmp/octopus/w/8.7/improvement/CLDSRV-371/etag 2023-03-29 20:22:38 +00:00
Nicolas Humbert f20bf1becf CLDSRV-371 ETag should be surrounded by double quotes 2023-03-29 16:16:52 -04:00
bert-e d31c773e77 Merge branch 'w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e d266ff4e9f Merge branch 'w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.7/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 6ff21996f5 Merge branch 'w/8.5/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.6/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:17 +00:00
bert-e 15d1b3ba86 Merge branch 'w/7.70/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest' into tmp/octopus/w/8.4/improvement/CLDSRV-369-versionSpecificDeleteWithNullFuncTest 2023-03-28 21:14:16 +00:00
bert-e 7dc2f07cb6 Merge branch 'w/8.7/improvement/CLDSRV-366/clear' into tmp/octopus/q/8.7 2023-03-28 13:25:15 +00:00
Kerkesni 6c22d87c55
bump version to 8.7.11
Issue: CLDSRV-362
2023-03-28 12:25:11 +02:00
Kerkesni 310f67d3a7
throw error when getting a transitioning object
Issue: CLDSRV-362
2023-03-28 12:24:50 +02:00
Kerkesni 49841c5e0e
throw error when copying parts from a cold object
A cold object should not be allowed to get copied as the data
is not accessible.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni b5334baca8
throw error when copying a cold or transitioning object
A cold object should not be allowed to get copied as the data
is not accessible.

Same issue happens when copying an object that is transitioning,
the data might get deleted while copying is still in progress.

Issue: CLDSRV-362
2023-03-28 12:24:49 +02:00
Kerkesni e592671b54
add helper to check if object is in cold storage
Issue: CLDSRV-362
2023-03-28 12:24:48 +02:00
bert-e 6e0b66849d Merge branch 'improvement/CLDSRV-366/clear' into tmp/octopus/w/8.7/improvement/CLDSRV-366/clear 2023-03-28 03:45:02 +00:00
Nicolas Humbert f2292f1ca3 CLDSRV-366 Clear list orphan delete markers response 2023-03-27 15:52:49 -04:00
bert-e 18a1bfd325 Merge branch 'w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.7/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:13 +00:00
bert-e c2b54702f6 Merge branch 'w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.6/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 23:39:12 +00:00
Jonathan Gramain 13a5e14da5 impr: CLDSRV-356 [8.5+] adapt overwritingVersioning() for archive
Due to the change in what processVersioningState() returns
(nullVersionId embedded in an "extraMD" field for clarity), modify the
overwritingVersioning() helper that needs to have the same contract
than the former function.
2023-03-24 16:37:24 -07:00
Jonathan Gramain 891913fd16 Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState' into w/8.5/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 15:52:32 -07:00
bert-e 7baa2501e6 Merge branch 'improvement/CLDSRV-356-enhanceProcessVersioningState' into tmp/octopus/w/8.4/improvement/CLDSRV-356-enhanceProcessVersioningState 2023-03-24 22:50:02 +00:00
bert-e 2c999f4c10 Merge branch 'w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.7/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e b23472a754 Merge branch 'w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.6/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:54 +00:00
bert-e a4999c1bfb Merge branch 'w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.5/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
bert-e fe0b0f8b2f Merge branch 'feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys' into tmp/octopus/w/8.4/feature/CLDSRV-354-modifyMetadataGetObjectForNullKeys 2023-03-24 19:46:53 +00:00
bert-e bf7a643d45 Merge branch 'w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.7/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:15 +00:00
bert-e 874a53c767 Merge branch 'w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into tmp/octopus/w/8.6/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 19:07:14 +00:00
Jonathan Gramain c7e1c6921b Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.5/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:43:47 -07:00
Jonathan Gramain 6d2d56bc1e Merge remote-tracking branch 'origin/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests' into w/8.4/feature/CLDSRV-349-nullVersionCompatModeConfigAndTests 2023-03-24 11:04:47 -07:00
bert-e 3f3bf0fdf0 Merge branches 'w/8.7/bugfix/CLDSRV-363/etag' and 'q/5078/8.6/bugfix/CLDSRV-363/etag' into tmp/octopus/q/8.7 2023-03-24 18:01:38 +00:00
bert-e 1922facb7b Merge branch 'bugfix/CLDSRV-363/etag' into q/8.6 2023-03-24 18:01:38 +00:00
bert-e 2a44949048 Merge branches 'development/8.7' and 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 1576352613 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:06 +00:00
bert-e 74978f423e Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 6f4cd75d6f Merge branch 'w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-24 16:52:05 +00:00
bert-e 6660626190 Merge branch 'bugfix/CLDSRV-363/etag' into tmp/octopus/w/8.7/bugfix/CLDSRV-363/etag 2023-03-24 13:23:06 +00:00
Nicolas Humbert 049f52bf95 CLDSRV-363 ETag instead of Etag for lifecycle listings Contents 2023-03-23 16:51:12 -04:00
williamlardier 58fc0b7146
CLDSRV-350: bump to 8.7.10 2023-03-21 13:52:26 +01:00
williamlardier 11e3d7ecb2
CLDSRV-350: update veeam put and delete routes with new arsenal methods
We must ensure that concurrent updates of the bucket metadata won't conflict
with each other, by separately updating the capabilities fields. This change
ensures that two files can be uploaded at the same without any problem,
regardless of the number of cloudserver instances.
2023-03-21 13:52:25 +01:00
williamlardier 1bab851ce3
CLDSRV-350: bump arsenal version 2023-03-21 13:52:25 +01:00
bert-e 0bc0341f33 Merge branch 'w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.7/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e b5af652dc8 Merge branch 'w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:37 +00:00
bert-e 6c29be5137 Merge branch 'w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 22:27:36 +00:00
Jonathan Gramain 2967f327ed Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion' into w/8.4/bugfix/CLDSRV-361-fixExceptionBatchDeleteNullVersion 2023-03-20 15:08:01 -07:00
bert-e b5b0f6482b Merge branch 'feature/CLDSRV-317/listLifecycleOrphans' into tmp/octopus/w/8.7/feature/CLDSRV-317/listLifecycleOrphans 2023-03-20 13:53:09 +00:00
Nicolas Humbert ec9ed94555 CLDSRV-317 Implement listLifecycleOrphans 2023-03-20 09:52:42 -04:00
bert-e 755f282f8e Merge branch 'feature/CLDSRV-316/listLifecycleNonCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-316/listLifecycleNonCurrents 2023-03-17 18:00:21 +00:00
Nicolas Humbert 41cc399d85 CLDSRV-316 Implement listLifecycleNonCurrents 2023-03-17 13:58:22 -04:00
bert-e c4dc928de2 Merge branch 'feature/CLDSRV-314/listLifecycleCurrents' into tmp/octopus/w/8.7/feature/CLDSRV-314/listLifecycleCurrents 2023-03-17 16:20:16 +00:00
Nicolas Humbert 6b8a2581b6 CLDSRV-314 Implement listLifecycleCurrents 2023-03-17 11:48:02 -04:00
Killian Gardahaut a0087e8d77
Bump 8.7.9
Issue: ZKOP-219
2023-03-17 09:58:21 +01:00
KillianG 8e5bea56b6
Refacto tests for more readability
Issue: CLDSRV-337
2023-03-17 09:58:21 +01:00
KillianG 976e349036
Add tests
Adding test for the function azureArchiveLocationConstraintAssert

Issue: CLDSRV-337
2023-03-17 09:58:16 +01:00
KillianG de1c23ac1b
Add test on location constraints to ensure the location is well configured
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 0b4d04a2a3
Add location azure archive to cold storage locations
Issue: CLDSRV-337
2023-03-17 09:56:35 +01:00
KillianG 049d396c8d
Add azure_archive location type
ISSUE: CLDSRV-337
2023-03-17 09:56:35 +01:00
Naren 5c04cbe6d1 Merge remote-tracking branch 'origin/w/8.6/improvement/CLDSRV-327-cloudserver-metrics' into w/8.7/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:36:59 -07:00
Naren d3e538087a Merge remote-tracking branch 'origin/w/8.5/improvement/CLDSRV-327-cloudserver-metrics' into w/8.6/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 22:05:26 -07:00
bert-e 7cc37c7f3d Merge branch 'w/8.4/improvement/CLDSRV-327-cloudserver-metrics' into tmp/octopus/w/8.5/improvement/CLDSRV-327-cloudserver-metrics 2023-03-17 03:50:44 +00:00
Naren 399d081d68 impr: CLDSRV-327 upgrade arsenal, bucketclient, prom-client, utapi, vaultclient 2023-03-16 20:33:03 -07:00
Naren c3fac24366 Merge remote-tracking branch 'origin/improvement/CLDSRV-327-cloudserver-metrics' into w/8.4/improvement/CLDSRV-327-cloudserver-metrics 2023-03-16 20:23:37 -07:00
bert-e 5cb63991a8 Merge branch 'w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.7/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 16:30:18 +00:00
Naren d5b336d1d9 Merge remote-tracking branch 'origin/w/8.5/improvement/CLDSRV-328-adapt-prescribed-metric-names' into w/8.6/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 08:29:39 -08:00
bert-e 750223500d Merge branch 'improvement/CLDSRV-328-adapt-prescribed-metric-names' into tmp/octopus/w/8.5/improvement/CLDSRV-328-adapt-prescribed-metric-names 2023-03-02 15:31:17 +00:00
Naren 23ffbf77d2 impr: CLDSRV-328 fix ceph java deps installation 2023-03-02 07:29:22 -08:00
Naren 6ea18bcef4 impr: CLDSRV-328 adapt metric naming conventions 2023-03-02 06:44:45 -08:00
Alexander Chan c310cb3dd1 Merge remote-tracking branch 'origin/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.7/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:47:15 -08:00
bert-e 7fe0e2891b Merge branch 'w/8.5/feature/CLDSRV-336/supportNewerNoncurrentVersions' into tmp/octopus/w/8.6/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-27 02:43:32 +00:00
bert-e 93442fed68 Merge branch 'w/8.4/feature/CLDSRV-336/supportNewerNoncurrentVersions' into tmp/octopus/w/8.5/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-27 02:43:31 +00:00
Alexander Chan 21612cfadd Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-336/supportNewerNoncurrentVersions' into w/8.4/feature/CLDSRV-336/supportNewerNoncurrentVersions 2023-02-26 18:20:17 -08:00
bert-e 22cda51944 Merge branch 'w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/q/8.7 2023-02-22 16:39:53 +00:00
williamlardier 408d0de732
CLDSRV-343: bump cloudserver to the next version 2023-02-22 09:59:09 +01:00
williamlardier 83916c91fb
CLDSRV-343: enable back some CEPH backend tests
These tests also cover the ObjectTagging API with multiple backend.
Enabling them back will allow us to better avoid issues like this
in the future.
2023-02-17 14:24:59 +01:00
bert-e 110b2a35ed Merge branch 'w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.7/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:38 +00:00
bert-e 3b5f5875f3 Merge branch 'w/8.5/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.6/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:37 +00:00
bert-e bdaf92023f Merge branch 'w/8.4/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.5/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:37 +00:00
bert-e 25d1cd9601 Merge branch 'w/7.70/bugfix/CLDSRV-344-doubleCallbackInFuncTest' into tmp/octopus/w/8.4/bugfix/CLDSRV-344-doubleCallbackInFuncTest 2023-02-17 00:55:36 +00:00
williamlardier a8117ca037
CLDSRV-343: use bucket name for backend tagging operations 2023-02-16 15:51:49 +01:00
bert-e 9145d1cf79 Merge branches 'w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.7 2023-02-15 20:43:46 +00:00
bert-e 0fb54c9d31 Merge branches 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.6 2023-02-15 20:43:46 +00:00
bert-e 63dc33a339 Merge branches 'w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.5 2023-02-15 20:43:45 +00:00
bert-e 49d46dfe04 Merge branches 'w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' and 'q/5003/7.70/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/q/8.4 2023-02-15 20:43:44 +00:00
bert-e ae1b6dc3d1 Merge branch 'w/8.6/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.7/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e 162157580f Merge branch 'w/8.5/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.6/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
bert-e 4e4435d82e Merge branch 'w/8.4/feature/CLDSRV-342/bump-7.70.16' into tmp/octopus/w/8.5/feature/CLDSRV-342/bump-7.70.16 2023-02-14 20:05:16 +00:00
Alexander Chan b0db1f9a94 Merge remote-tracking branch 'origin/feature/CLDSRV-342/bump-7.70.16' into w/8.4/feature/CLDSRV-342/bump-7.70.16 2023-02-14 12:03:01 -08:00
bert-e b1304b5f7f Merge branches 'w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.7 2023-02-10 12:57:22 +00:00
bert-e c355422a7e Merge branches 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.6 2023-02-10 12:57:22 +00:00
bert-e d44334ad22 Merge branches 'w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.5 2023-02-10 12:57:21 +00:00
bert-e 6e9c50eeba Merge branches 'w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' and 'q/5000/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/q/8.4 2023-02-10 12:57:21 +00:00
bert-e 6b1f8c61ec Merge branch 'w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.7/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:09 +00:00
bert-e a12d44dc18 Merge branch 'w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.6/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:08 +00:00
bert-e d5ec32fc5c Merge branch 'w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into tmp/octopus/w/8.5/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 23:05:08 +00:00
Jonathan Gramain e16da9ab11 Merge remote-tracking branch 'origin/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete' into w/8.4/improvement/CLDSRV-340-simplifyPreprocessingVersioningDelete 2023-02-09 14:43:41 -08:00
bert-e 335bfabed1 Merge branch 'w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.7/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 3b92eaaef2 Merge branch 'w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.6/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e a6fd8b2261 Merge branch 'w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.5/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:15 +00:00
bert-e 00ab8d482d Merge branch 'w/7.70/bugfix/CLDSRV-338/fixMaxKeysV2Listing' into tmp/octopus/w/8.4/bugfix/CLDSRV-338/fixMaxKeysV2Listing 2023-02-09 19:30:14 +00:00
bert-e 3398db3c0f Merge branch 'w/8.6/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.7/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:19 +00:00
bert-e 00a793be6e Merge branch 'w/8.5/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.6/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:18 +00:00
bert-e 68bb824b57 Merge branch 'w/8.4/bugfix/CLDSRV-339-revert-S3C-7054' into tmp/octopus/w/8.5/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 23:08:18 +00:00
Jonathan Gramain 432680841e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-339-revert-S3C-7054' into w/8.4/bugfix/CLDSRV-339-revert-S3C-7054 2023-02-06 14:52:41 -08:00
bert-e 836e9fb22d Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:46 +00:00
bert-e 9bc7fa49ea Merge branch 'w/8.5/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.6/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:45 +00:00
bert-e e3087fb940 Merge branch 'w/8.4/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.5/bugfix/CLDSRV-335-build-federation-image-tests 2023-02-02 09:21:45 +00:00
Dimitrios Vasilas 67e126320c Revert "CLDSRV-335: build federation image in tests workflow"
This reverts commit fd669664a6.
2023-02-02 04:21:00 -05:00
Dimitrios Vasilas 66520571d3 Revert "CLDSRV-335: build federation image in tests workflow"
This reverts commit fd669664a6.
2023-02-02 04:19:20 -05:00
bert-e ead7f5f7c2 Merge branch 'w/8.6/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.7/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:42 +00:00
bert-e fe636d22fc Merge branch 'w/8.5/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.6/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:41 +00:00
bert-e 6530e70761 Merge branch 'w/8.4/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.5/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:41 +00:00
bert-e 6d14bda3ed Merge branch 'w/7.70/bugfix/CLDSRV-335-build-federation-image-tests' into tmp/octopus/w/8.4/bugfix/CLDSRV-335-build-federation-image-tests 2023-01-31 10:46:40 +00:00
bert-e c17059dc77 Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e b4617f1362 Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:51 +00:00
bert-e 624d4708cf Merge branch 'w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:50 +00:00
bert-e 95c180e9d9 Merge branch 'w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-27 17:21:50 +00:00
bert-e 8ace5b24a5 Merge branches 'development/8.7' and 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:48 +00:00
bert-e 4b1dcd531d Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:47 +00:00
bert-e 13ef509cbc Merge branch 'w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-25 15:02:47 +00:00
Dimitrios Vasilas d4feda7bbd CLDSRV-333: remove parentheses around single function argument 2023-01-25 09:34:47 -05:00
bert-e 39f7035dbd Merge branch 'w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.7/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:42 +00:00
bert-e 7d3ab342f6 Merge branch 'w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into tmp/octopus/w/8.6/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 14:13:41 +00:00
Dimitrios Vasilas af60df4caf Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into w/8.5/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 09:12:54 -05:00
Dimitrios Vasilas 2acd7348d4 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-333-handle-MPU-delete-overwrite' into w/8.4/improvement/CLDSRV-333-handle-MPU-delete-overwrite 2023-01-24 09:08:52 -05:00
williamlardier bb62ed4fa7
CLDSRV-334: bump cloudserver to 8.7.7 2023-01-24 12:33:37 +01:00
williamlardier c95368858d
CLDSRV-334: bump arsenal to 8.1.82 2023-01-24 12:33:17 +01:00
bert-e d8ff1377fc Merge branch 'w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.7/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:31 +00:00
bert-e 425a9167ca Merge branch 'w/8.5/feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.6/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:30 +00:00
bert-e 2f21b9cc52 Merge branch 'feature/CLDSRV-329/migrateToGithubActions-8.x' into tmp/octopus/w/8.5/feature/CLDSRV-329/migrateToGithubActions-8.x 2023-01-20 02:29:30 +00:00
Alexander Chan d6433961a1 CLDSRV-329: adapt release step for 8.x 2023-01-19 18:28:59 -08:00
Alexander Chan 090b276f23 CLDSRV-329 migrate mongodb and ceph functional tests 2023-01-18 17:50:12 -08:00
Jonathan Gramain 28f4c5baee Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.7/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:44 -08:00
Jonathan Gramain 89a1c646ad Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.6/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:49:05 -08:00
Jonathan Gramain 5c249f0c56 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.5/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:48:27 -08:00
Jonathan Gramain c971669b9b Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix' into w/8.4/bugfix/CLDSRV-330-setNextMarkerToCommonPrefix 2023-01-14 11:47:23 -08:00
bert-e 0a8f846f4b Merge branch 'w/8.6/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.7/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
bert-e 045602fc00 Merge branch 'w/8.5/feature/CLDSRV-244/migrateToGithubActions' into tmp/octopus/w/8.6/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 22:54:04 +00:00
Alexander Chan 5048c1fef1 Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-244/migrateToGithubActions' into w/8.5/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 14:34:55 -08:00
Alexander Chan 1e95d108be Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-244/migrateToGithubActions' into w/8.4/feature/CLDSRV-244/migrateToGithubActions 2023-01-12 14:04:54 -08:00
Jonathan Gramain ac5de47ca1 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-312-bumpArsenal' into w/8.7/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 16:03:10 -08:00
Jonathan Gramain 3c0f3e671a Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-312-bumpArsenal' into w/8.6/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 13:48:58 -08:00
Jonathan Gramain a3dc3f9fb8 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-312-bumpArsenal' into w/8.5/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 13:44:46 -08:00
Jonathan Gramain e4bf9500a3 Merge branch 'w/7.70/bugfix/CLDSRV-312-bumpArsenal' into w/8.4/bugfix/CLDSRV-312-bumpArsenal 2023-01-09 12:41:33 -08:00
williamlardier c147785464
CLDSRV-322: bump cloudserver version 2023-01-06 09:04:04 +01:00
williamlardier ca8c788757
CLDSRV-322: code improvements 2023-01-06 09:04:03 +01:00
williamlardier cb2af364bb
CLDSRV-322: Implement test for custom routes
Unit and funcitonal tests are implemented to test the custom routes.
The LISTing is not yet tested, as it requires more changes to
generate a valid signature, from Mocha.
2023-01-05 15:31:33 +01:00
williamlardier 1eb27d610b
CLDSRV-322: Support custom files for MultiObjectDelete
MultiObjectDelete is implemented by the product UI to delete the
files in buckets. This method is a POST that relies on the request
body to filter the objects, hence, it is not possible to filter
it as an ingress rule in nginx.

The implementation tries to avoid adding any complexity
by extending existing loops, and implementing a new step if elligible
files are found.

These files are extracted from the Veeam route list of accepted files,
but this implementation might change if more custom APIs are supported
in the future.
2023-01-05 15:31:33 +01:00
williamlardier 73b295c91d
CLDSRV-322: Implement LIST for SOSAPI routes
Listing of objects is needed for consistent user experience in the
product's User Interface.

Listing is implemented as a `GET` request with a specific query parameter
`list-type` and folder `.system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c`.

This API:
- Handles both versioned and non-versioned listing
- Relies on predefined templates to fill the response content
- Extracts the system.xml and capacity.xml files from the bucket
  metadata
- Computes the listing response based on the input query parameters
  and files currently in the bucket md capabilities
- Handle errors if any non supported query parameter is used. As any GET
  request is routed to this method, we return InvalidRequest if the requested
  action is not supported (i.e., not a listing v2)
2023-01-05 15:31:32 +01:00
williamlardier 8186c84bf9
CLDSRV-322: Implement DELETE for SOSAPI routes
Deletion of Veam SOSAPI files are required to ensure consistent
user experience. This API is only exposed to API/CLI clients, not
User Interface. The User Interface relies on MultiObjectDelete to
perform the deletions, and is handled in a separate commit.

This API:
- Checks that the requested file exists
- Erase the bucket metadata according to the file
- Update the bucket metadata with the updated values.
- Handle errors if the Veaam capability is not yet enabled for the bucket
2023-01-05 15:31:32 +01:00
williamlardier 93ef2d0545
CLDSRV-322: Implement HEAD for SOSAPI routes
HEAD object is not formally required by Veeam SOSAPI, but Veeam
relies on the last-modified date value of the capacity.xml file.
To suppoort any change in future SOSAPI standard, the HEAD method
is implemented, and is similar to the GET method, where only the metadata
are returned.
2023-01-05 15:31:31 +01:00
williamlardier d7d0a31bb1
CLDSRV-322: Implement PUT for SOSAPI routes
In the SOSAPI context, the user is requested to pre-created two files,
system.xml and capacity.xml under the veeam folder, to enable the feature.

This API:
- Extracts the XML from the provided file, convert it to JSON
- Validate that the JSON is valid against joi schemes, if applicable
- Updates the bucket metadata, including the last-modified date
- Update in the database the bucket metadata
- Return the standard success code response
- Handle invalid XML or XML structure, and return an error accordingly
2023-01-05 15:31:31 +01:00
williamlardier 4c69b82508
CLDSRV-322: Implement GET for SOSAPI
The GET method is used by SOSAPI to determine if SOS API is enabled
or not on a bucket.

Two files are supported: system.xml and capacity.xml.

This API:
- Get the bucket metadata
- Dynamically recomputes a valid XML based on the bucket md content
  using xml2js as headless, to enforce the same XML as the one
  from SOSAPI standard
- Rejects the request with an error if the bucket metadata does not
  exist
- Handle the `?tagging` request, required for versioned bucket, to
  return a static content.

Output stream relies on the utils file.
2023-01-05 15:31:30 +01:00
williamlardier ca13284da3
CLDSRV-322: implement common util functions
Custom SOSAPI routes might either retreive or stream data. The utils file
re-implement, with support for this particular context, some functions
from the standard API paths, from Arsenal.

These changes mostly introduce ways to compute the right HTTP headers as
well as input our output streams to handle GET or PUT request types.
2023-01-05 15:31:30 +01:00
williamlardier c6ed75a1d7
CLDSRV-322: implement SOSAPI scheme validator
SOSAPI relies on standard XML files for both the system and the capacity.
It is used by Veeam12+ to determine what capabilities and/or
configuration should be enforced for a given S3-integrated Bucket used
for backups.

The commit introduces scheme validation for JSON objects, as XML will
be first converted using xml2js.

The system.xml file includes the protocol version of SOSAPI: if the
version is not know, no validation is made, to allow for future changes
without formal need to update the product.

Note: maximum XML file size, in case of unsupported protocol version, will
be enforced to avoid spacing issues with the database.
2023-01-05 15:31:30 +01:00
williamlardier 402d0dea1a
CLDSRV-322: Create a new route for Veeam12 SOS API.
This new route is exposed through special nginx rules
from Zenko-Operator, to redirect any call to the veeam
folder, located under .system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c
at the root of the bucket. The goal is to store files in
the bucket metadata, to ease their update by internal jobs.
To avoid impacting standard API, we rely on custom routes
with dedicated logic to handle these files, in a generic
way.

This commit introduces a new route that will manage, in turn,
the:
- Handling of incoming request.
- Validity checks, including list of suppoorted APIs according
  to the HTTP verb and query parameters.
- Authentication and Authorization with Vault, in the same
  way as usual files.
- Check of the targeted bucket and/or keys, to extract the
  bucket metadata.
- Routing of the request to the right API handler.
2023-01-05 15:31:29 +01:00
williamlardier 95faec1db0
CLDSRV-322: bump arsenal version 2023-01-05 15:31:29 +01:00
Jonathan Gramain ca9d53f430 Merge remote-tracking branch 'origin/w/8.6/bugfix/CLDSRV-321-version-bump' into w/8.7/bugfix/CLDSRV-321-version-bump 2022-12-26 11:19:03 -08:00
Jonathan Gramain ba27ff7980 Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-321-version-bump' into w/8.6/bugfix/CLDSRV-321-version-bump 2022-12-26 11:18:27 -08:00
Jonathan Gramain 8957997e23 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-321-version-bump' into w/8.5/bugfix/CLDSRV-321-version-bump 2022-12-26 11:17:26 -08:00
Jonathan Gramain 3caeda5d39 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-321-version-bump' into w/8.4/bugfix/CLDSRV-321-version-bump 2022-12-26 11:15:32 -08:00
bert-e b1ee1f8ef7 Merge branch 'w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.7/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
bert-e 28d778c2d4 Merge branch 'w/8.5/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.6/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:26 +00:00
bert-e b180aac9ba Merge branch 'w/8.4/bugfix/CLDSRV-321/fix_retention_extension_check' into tmp/octopus/w/8.5/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 23:27:25 +00:00
Taylor McKinnon c353452128 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-321/fix_retention_extension_check' into w/8.4/bugfix/CLDSRV-321/fix_retention_extension_check 2022-12-21 15:26:32 -08:00
williamlardier e882cb6781
Merge remote-tracking branch 'origin/bugfix/CLDSRV-320-bump-arsenal' into w/8.7/bugfix/CLDSRV-320-bump-arsenal 2022-12-20 17:44:48 +01:00
williamlardier 8543f1a934
CLDSRV-320: bump project version 2022-12-20 17:43:02 +01:00
williamlardier fc871fbbfa
CLDSRV-320: bump arsenal to 8.1.77 2022-12-20 17:42:59 +01:00
Francois Ferrand cb7303636c
Release bump 8.7.1
Issue: CLDSRV-306
2022-12-16 19:56:19 +01:00
Francois Ferrand 6d0f889c23
Merge remote-tracking branch 'origin/feature/CLDSRV-306' into w/8.7/feature/CLDSRV-306 2022-12-16 19:54:23 +01:00
Francois Ferrand c13f2ae6a5
Merge remote-tracking branch 'origin/improvement/CLDSRV-305' into w/8.7/improvement/CLDSRV-305 2022-12-16 18:08:52 +01:00
Francois Ferrand 03058371e9
Release bump 8.6.4
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 473fed7594
Migrate tests to new azure storage sdk
Issue: CLDSRV-305
2022-12-16 17:54:09 +01:00
Francois Ferrand d86b9144be
Handle isSameAzureAccount() for other auth methods
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 2f2d9ced4c
Add unit tests for azure auth config
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand 57a0ffc746
Support alternate azure auth methods in config
Issue: CLDSRV-306
2022-12-16 17:54:09 +01:00
Francois Ferrand d839cf2394
Bump arsenal
https://github.com/scality/Arsenal/tree/improvement/ARSN-281

Issue: CLDSRV-305
2022-12-16 16:21:43 +01:00
bert-e b6611c4711 Merge branch 'w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.7/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:48 +00:00
bert-e 461f5ac5f9 Merge branch 'w/8.4/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.5/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:47 +00:00
bert-e 413a42adf0 Merge branch 'w/8.5/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into tmp/octopus/w/8.6/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 22:52:47 +00:00
Jonathan Gramain 7be27e0a83 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10' into w/8.4/bugfix/CLDSRV-173-CLDSRV-170-CLDSRV-177-S3C-5390-development-7.10 2022-12-15 14:51:13 -08:00
bert-e ae4ece471b Merge branch 'w/8.7/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/q/8.7 2022-12-14 21:19:55 +00:00
williamlardier 15b61cd947
CLDSRV-297: bump cloudserver to 8.7.0 2022-12-14 18:16:55 +01:00
williamlardier 91536c575f
CLDSRV-297: bump projects versions 2022-12-14 18:16:52 +01:00
bert-e 864ce1f27d Merge branch 'w/8.4/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/w/8.5/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-14 04:56:43 +00:00
bert-e 9d007a76b1 Merge branch 'w/8.5/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into tmp/octopus/w/8.6/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-14 04:56:43 +00:00
Artem Bakalov f4e292c6f9 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag' into w/8.4/improvement/CLDSRV-301-md-get-object-from-non-versioned-buckets-flag 2022-12-13 20:55:44 -08:00
Francois Ferrand a2eb347fe3
Join latency lines when no operation
Latency is expected to be NaN when there are no operation: and we should
not mask this as "0 latency", which would be incorrect.

To make the graph easier to read and less confusing, we now 'join' the
lines if there is less than 3 minutes with no data.

As we plot the individual data points on the graph, the missing data can
still be observed by looking for these points.

Issue: CLDSRV-309
2022-11-25 14:03:35 +01:00
Francois Ferrand 0ff1262f97
Display metrics in op/s
This is more practical for ops, when dealing with large amount of data.

Issue: CLDSRV-309
2022-11-25 14:03:35 +01:00
Francois Ferrand 54a23d90c1
Fix http method breakdown
Should compute value based on all values in the current window.

Issue: CLDSRV-309
2022-11-23 21:42:06 +01:00
Kerkesni eb3dc9b79f
feature: CLDSRV-308 bump version in package.json to 8.6.3 2022-11-18 14:48:56 +01:00
bert-e 2c8968ef4a Merge branch 'feature/CLDSRV-304-support-object-restore-completed-notification' into q/8.6 2022-11-16 09:42:12 +00:00
Kerkesni a449aa35f4
feature: CLDSRV-303 support s3:ObjectRestore:Completed event notification 2022-11-15 19:11:49 +01:00
Kerkesni c2c8582585
feature: CLDSRV-303 support s3:ObjectRestore:Post event notification 2022-11-15 17:31:22 +01:00
Kerkesni 82c1bd7211
feature: CLDSRV-277 bump arsenal to 8.1.72 2022-11-14 11:23:02 +01:00
bert-e 776af747f2 Merge branch 'feature/CLDSRV-295/bumpArsenalVersion' into tmp/octopus/w/8.6/feature/CLDSRV-295/bumpArsenalVersion 2022-11-12 10:40:37 +00:00
Alexander Chan 453fec0cb0 CLDSRV-295: bump arsenal 8.1.71 2022-11-11 22:03:21 -08:00
bert-e f9fd3cae16 Merge branch 'w/8.5/bugfix/CLDSRV-293/bump_cloudserver_version' into tmp/octopus/w/8.6/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 22:01:36 +00:00
bert-e 3662c406ec Merge branch 'w/8.4/bugfix/CLDSRV-293/bump_cloudserver_version' into tmp/octopus/w/8.5/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 22:01:36 +00:00
Taylor McKinnon 243876ef81 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-293/bump_cloudserver_version' into w/8.4/bugfix/CLDSRV-293/bump_cloudserver_version 2022-11-11 14:01:05 -08:00
bert-e f6fe11b763 Merge branch 'w/8.5/bugfix/CLDSRV-293/refactor_olock_checks' into tmp/octopus/w/8.6/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 19:54:55 +00:00
Taylor McKinnon 5f94fce344 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-293/refactor_olock_checks' into w/8.5/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 11:54:16 -08:00
Taylor McKinnon af8420fe3c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-293/refactor_olock_checks' into w/8.4/bugfix/CLDSRV-293/refactor_olock_checks 2022-11-11 11:52:21 -08:00
bert-e e5c58ecc3d Merge branch 'w/8.5/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.6/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:34 +00:00
bert-e 6ef88fd60e Merge branch 'w/8.4/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.5/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:34 +00:00
bert-e 483e91a8d6 Merge branch 'w/7.70/bugfix/CLDSRV-296-removeGetProductVersion' into tmp/octopus/w/8.4/bugfix/CLDSRV-296-removeGetProductVersion 2022-11-04 20:17:33 +00:00
Jonathan Gramain 7692d2c376 Merge remote-tracking branch 'origin/w/8.5/feature/CLDSRV-294-bump-7.10.19' into w/8.6/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:37:43 -07:00
Jonathan Gramain a0d7b07dc6 Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-294-bump-7.10.19' into w/8.5/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:36:36 -07:00
Jonathan Gramain a9c21b98f9 Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-294-bump-7.10.19' into w/8.4/feature/CLDSRV-294-bump-7.10.19 2022-11-03 21:23:12 -07:00
bert-e 3257f4e905 Merge branch 'w/8.5/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into tmp/octopus/w/8.6/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 22:35:18 +00:00
bert-e 1d190019f7 Merge branch 'w/8.4/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into tmp/octopus/w/8.5/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 22:35:17 +00:00
Jonathan Gramain 79e7dc3946 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy' into w/8.4/bugfix/CLDSRV-291-testObjectHeadWithBucketPolicy 2022-11-03 15:31:25 -07:00
Francois Ferrand 7db26fae9a
Release 8.6.1 2022-10-28 15:36:35 +02:00
Francois Ferrand 7faf8c2366
Fix chunk upload/download size
The formula is not statistically accurate, but it gives an estimation,
assuming the repartition of object size is somewhat linear.

Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand e803078952
Add per-operation latency and count
Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand cfd72f3a38
Fix last report query
Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Francois Ferrand 69a96d3993
Fix rounding of counts
Add round() operator in query instead of limiting to 0 decimals, to
allow grafana to display fractional value when there is a "unit" (like
`1.25K`)

Issue: CLDSRV-288
2022-10-28 15:36:35 +02:00
Taylor McKinnon d5bb8d8ed3 Merge remote-tracking branch 'origin/w/8.5/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.6/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:43:28 -07:00
Taylor McKinnon aeb8de54db Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.5/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:37:03 -07:00
Taylor McKinnon 8f62260d70 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning' into w/8.4/bugfix/CLDSRV-289/fix_utapiv2_delete_obj_suspended_versioning 2022-10-27 13:36:19 -07:00
bert-e 29985f8955 Merge branch 'w/8.5/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into tmp/octopus/w/8.6/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 22:53:59 +00:00
Jonathan Gramain b081918317 Merge remote-tracking branch 'origin/w/8.4/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into w/8.5/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 15:53:06 -07:00
Jonathan Gramain 9049555887 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData' into w/8.4/bugfix/CLDSRV-290-putMetadataRouteShouldNotRemoveValidData 2022-10-25 10:47:34 -07:00
bert-e b125bcb0b7 Merge branch 'w/8.5/bugfix/CLDSRV-275/bump-utapi' into tmp/octopus/w/8.6/bugfix/CLDSRV-275/bump-utapi 2022-10-21 18:25:46 +00:00
bert-e dd93e2f0be Merge branch 'w/8.4/bugfix/CLDSRV-275/bump-utapi' into tmp/octopus/w/8.5/bugfix/CLDSRV-275/bump-utapi 2022-10-21 18:25:46 +00:00
Taylor McKinnon d8dc35f1cf Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-275/bump-utapi' into w/8.4/bugfix/CLDSRV-275/bump-utapi 2022-10-21 11:25:09 -07:00
Francois Ferrand 4e935dff1a
Release 8.6.0
Issue: CLDSRV-287
2022-10-17 10:10:04 +02:00
Francois Ferrand ecd54df821
Use node:16.17.1 bulleye slim base image
* Use more recent base image to get CVE fixes
* Use separate builder image to minimize the prod image

Issue: CLDSRV-287
2022-10-17 10:09:30 +02:00
bert-e d523b6f1b6 Merge branch 'w/8.4/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended' into tmp/octopus/w/8.5/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended 2022-10-15 02:10:11 +00:00
Artem Bakalov ab95973786 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended' into w/8.4/bugfix/CLDSRV-275-utapi-v1-delete-inconsistency-with-versioning-suspended 2022-10-14 19:09:19 -07:00
bert-e de094c53cd Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-07 20:49:44 +00:00
bert-e 0234ec7461 Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-07 20:49:44 +00:00
bert-e 4f1bd8e634 Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 23:08:24 +00:00
bert-e bcabab454c Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 23:08:23 +00:00
bert-e 47352b1df1 Merge branch 'w/8.4/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 18:52:22 +00:00
bert-e a019e89ebb Merge branch 'w/7.70/bugfix/CLDSRV-285-update-release-dockerfile' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-update-release-dockerfile 2022-10-06 18:52:21 +00:00
bert-e 59c6a9fb2a Merge branches 'w/8.5/bugfix/CLDSRV-285-correct-docker-image' and 'q/4824/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/q/8.5 2022-10-05 16:29:08 +00:00
bert-e 0c27fbebea Merge branch 'w/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/q/8.4 2022-10-05 16:29:08 +00:00
bert-e 01afc596e9 Merge branch 'w/8.4/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/w/8.5/bugfix/CLDSRV-285-correct-docker-image 2022-10-05 02:51:42 +00:00
bert-e dff4c42971 Merge branch 'w/7.70/bugfix/CLDSRV-285-correct-docker-image' into tmp/octopus/w/8.4/bugfix/CLDSRV-285-correct-docker-image 2022-10-05 02:51:42 +00:00
williamlardier 3ce13ddde9
Merge remote-tracking branch 'origin/bugfix/CLDSRV-286-bump-arsenal-fix-authz-regression' into w/8.5/bugfix/CLDSRV-286-bump-arsenal-fix-authz-regression 2022-10-04 19:43:21 +02:00
williamlardier a327aa83c1
CLDSRV-286: use latest Arsenal version
s
2022-10-04 19:39:14 +02:00
williamlardier 667cd471a4
CLDSRV-286: bump dockerfile base image 2022-10-04 14:53:00 +02:00
williamlardier 1b6b2ef4ed
Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-281-complete-missing-permission-checks' into w/8.5/improvement/CLDSRV-281-complete-missing-permission-checks 2022-09-27 14:05:51 +02:00
williamlardier 04b1d6c6a4
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-281-complete-missing-permission-checks' into w/8.4/improvement/CLDSRV-281-complete-missing-permission-checks 2022-09-27 13:55:20 +02:00
bert-e a149336c1a Merge branch 'w/8.4/bugfix/CLDSRV-282/put-bucket-object-lock-regression' into tmp/octopus/w/8.5/bugfix/CLDSRV-282/put-bucket-object-lock-regression 2022-09-23 18:53:07 +00:00
bert-e d3847224a4 Merge branch 'w/7.70/bugfix/CLDSRV-282/put-bucket-object-lock-regression' into tmp/octopus/w/8.4/bugfix/CLDSRV-282/put-bucket-object-lock-regression 2022-09-23 18:53:07 +00:00
bert-e ee090c4f03 Merge branch 'w/8.4/bugfix/CLDSRV-282/put-bucket-object-lock-regression' into tmp/octopus/w/8.5/bugfix/CLDSRV-282/put-bucket-object-lock-regression 2022-09-22 20:58:55 +00:00
bert-e 8f2aa95ec8 Merge branch 'w/7.70/bugfix/CLDSRV-282/put-bucket-object-lock-regression' into tmp/octopus/w/8.4/bugfix/CLDSRV-282/put-bucket-object-lock-regression 2022-09-22 20:58:55 +00:00
bert-e c8cdd8eacb Merge branch 'w/8.4/feature/CLDSRV-278/fix-github-action-typo' into tmp/octopus/w/8.5/feature/CLDSRV-278/fix-github-action-typo 2022-09-21 21:21:35 +00:00
bert-e 30455b9d6f Merge branch 'w/7.70/feature/CLDSRV-278/fix-github-action-typo' into tmp/octopus/w/8.4/feature/CLDSRV-278/fix-github-action-typo 2022-09-21 21:21:35 +00:00
bert-e 73474be2fe Merge branch 'w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.5/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-21 04:47:17 +00:00
bert-e 5cdbe049cf Merge branch 'w/7.70/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-21 04:47:16 +00:00
bert-e d819e9128a Merge branch 'w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.5/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-21 01:25:38 +00:00
bert-e d5dcd1f2c1 Merge branch 'w/7.70/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-21 01:25:38 +00:00
bert-e bc835899d0 Merge branch 'w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.5/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-20 23:13:44 +00:00
bert-e 0e741e0b6f Merge branch 'w/7.70/feature/CLDSRV-278/fix-workflow-env-var-usage' into tmp/octopus/w/8.4/feature/CLDSRV-278/fix-workflow-env-var-usage 2022-09-20 23:13:43 +00:00
bert-e bfcfb43999 Merge branch 'w/8.4/bugfix/CLDSRV-280/iam-user-can-put-object-with-retention-when-policy-deny' into tmp/octopus/w/8.5/bugfix/CLDSRV-280/iam-user-can-put-object-with-retention-when-policy-deny 2022-09-20 20:57:08 +00:00
bert-e 00fc3496ac Merge branch 'w/7.70/bugfix/CLDSRV-280/iam-user-can-put-object-with-retention-when-policy-deny' into tmp/octopus/w/8.4/bugfix/CLDSRV-280/iam-user-can-put-object-with-retention-when-policy-deny 2022-09-20 20:57:08 +00:00
bert-e 1d702112f0 Merge branch 'w/8.4/feature/CLDSRV-278/provide-image-for-federation' into tmp/octopus/w/8.5/feature/CLDSRV-278/provide-image-for-federation 2022-09-19 19:59:20 +00:00
Ronnie Smith 1b35948ce2
Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-278/provide-image-for-federation' into w/8.4/feature/CLDSRV-278/provide-image-for-federation 2022-09-19 12:58:31 -07:00
Alexander Chan 113c5c166f Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-276/bump-7.10.12' into w/8.5/improvement/CLDSRV-276/bump-7.10.12 2022-09-12 07:52:21 -07:00
Alexander Chan 53a988b167 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-276/bump-7.10.12' into w/8.4/improvement/CLDSRV-276/bump-7.10.12 2022-09-12 03:02:22 -07:00
bert-e d5b31cb669 Merge branch 'w/8.4/bugfix/CLDSRV-269/policy-checks-for-put-bucket-with-object-lock' into tmp/octopus/w/8.5/bugfix/CLDSRV-269/policy-checks-for-put-bucket-with-object-lock 2022-09-09 22:11:58 +00:00
Ronnie Smith 9bf176b7fb
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-269/policy-checks-for-put-bucket-with-object-lock' into w/8.4/bugfix/CLDSRV-269/policy-checks-for-put-bucket-with-object-lock 2022-09-09 15:11:06 -07:00
bert-e 7c92f34ee0 Merge branches 'w/8.5/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' and 'q/4757/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into tmp/octopus/q/8.5 2022-08-31 10:03:38 +00:00
bert-e 81d34525c7 Merge branches 'w/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' and 'q/4757/7.70/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into tmp/octopus/q/8.4 2022-08-31 10:03:37 +00:00
bert-e 9af76eb0ce Merge branches 'development/8.5' and 'w/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into tmp/octopus/w/8.5/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass 2022-08-31 02:32:21 +00:00
bert-e d3a622ea27 Merge branches 'development/8.4' and 'w/7.70/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into tmp/octopus/w/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass 2022-08-31 02:32:21 +00:00
bert-e 76301c9ec4 Merge branches 'w/8.5/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' and 'q/4772/8.4/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' into tmp/octopus/q/8.5 2022-08-31 01:32:15 +00:00
bert-e 3b6a2c9a55 Merge branches 'w/8.4/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' and 'q/4772/7.70/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' into tmp/octopus/q/8.4 2022-08-31 01:32:15 +00:00
bert-e 5fb3cf0ede Merge branch 'w/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into tmp/octopus/w/8.5/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass 2022-08-30 09:04:07 +00:00
Ronnie Smith 3a7c8f920b
Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass' into w/8.4/bugfix/CLDSRV-270/retention-time-cannot-be-increased-without-bypass 2022-08-30 02:03:20 -07:00
bert-e 83cf51a3d0 Merge branch 'w/8.4/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' into tmp/octopus/w/8.5/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi 2022-08-30 04:19:27 +00:00
Artem Bakalov 9544b18f2e Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi' into w/8.4/bugfix/CLDSRV-264/ignore_lifecycle_event_in_utapi 2022-08-29 21:18:19 -07:00
bert-e 22905c8967 Merge branch 'w/8.4/improvement/CLDSRV-274-release-7-10-10' into tmp/octopus/w/8.5/improvement/CLDSRV-274-release-7-10-10 2022-08-29 11:40:07 +00:00
KillianG d05f027837
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-274-release-7-10-10' into w/8.4/improvement/CLDSRV-274-release-7-10-10 2022-08-29 13:39:22 +02:00
bert-e f0b34678f7 Merge branch 'w/8.4/bugfix/CLDSRV-273/doNotSetEmptyStringDelimiter' into tmp/octopus/w/8.5/bugfix/CLDSRV-273/doNotSetEmptyStringDelimiter 2022-08-26 19:08:09 +00:00
bert-e cddaef2bb9 Merge branch 'w/7.70/bugfix/CLDSRV-273/doNotSetEmptyStringDelimiter' into tmp/octopus/w/8.4/bugfix/CLDSRV-273/doNotSetEmptyStringDelimiter 2022-08-26 19:08:08 +00:00
bert-e fdbfec2bcc Merge branch 'w/8.4/improvement/CLDSRV-271-bump-arsenal-7-10-34' into tmp/octopus/w/8.5/improvement/CLDSRV-271-bump-arsenal-7-10-34 2022-08-26 08:28:39 +00:00
KillianG 3f11dab32b
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-271-bump-arsenal-7-10-34' into w/8.4/improvement/CLDSRV-271-bump-arsenal-7-10-34 2022-08-26 10:27:54 +02:00
Jonathan Gramain 3be5f2633c Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-268-release-7.10.9' into w/8.5/feature/CLDSRV-268-release-7.10.9 2022-08-19 15:21:29 -07:00
Jonathan Gramain d947e4ffb2 Merge remote-tracking branch 'origin/w/7.70/feature/CLDSRV-268-release-7.10.9' into w/8.4/feature/CLDSRV-268-release-7.10.9 2022-08-19 15:18:43 -07:00
bert-e 4407b46d06 Merge branch 'w/8.5/bugfix/CLDSRV-257-callTagConditionKeyAuthOnce' into tmp/octopus/q/8.5 2022-08-18 00:04:22 +00:00
bert-e b1d42091b3 Merge branch 'bugfix/CLDSRV-267/bump' into q/8.5 2022-08-17 23:35:54 +00:00
Nicolas Humbert dbc99acd0d CLDSRV-267 bump arsenal 8.1.65 2022-08-17 19:04:19 -04:00
bert-e 8ec404dc7a Merge branch 'w/8.4/bugfix/CLDSRV-257-callTagConditionKeyAuthOnce' into tmp/octopus/w/8.5/bugfix/CLDSRV-257-callTagConditionKeyAuthOnce 2022-08-17 23:00:24 +00:00
Jonathan Gramain a1a7e4d888 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-257-callTagConditionKeyAuthOnce' into w/8.4/bugfix/CLDSRV-257-callTagConditionKeyAuthOnce 2022-08-17 15:59:43 -07:00
bert-e 1b560fa584 Merge branch 'w/8.4/bugfix/CLDSRV-266-checkCheckTagConditionsBeforeIsAllowed' into tmp/octopus/w/8.5/bugfix/CLDSRV-266-checkCheckTagConditionsBeforeIsAllowed 2022-08-17 22:16:17 +00:00
Jonathan Gramain 8a1828ef4c Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-266-checkCheckTagConditionsBeforeIsAllowed' into w/8.4/bugfix/CLDSRV-266-checkCheckTagConditionsBeforeIsAllowed 2022-08-17 15:15:36 -07:00
bert-e 717d9f844e Merge branch 'w/8.4/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts' into tmp/octopus/w/8.5/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts 2022-08-17 21:02:13 +00:00
Jonathan Gramain 04e2396b3b CLDSRV-256 [8.4] disable linter check for ES6 array iteration 2022-08-17 14:01:49 -07:00
bert-e 443f239b8e Merge branch 'w/8.4/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts' into tmp/octopus/w/8.5/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts 2022-08-12 20:28:24 +00:00
Jonathan Gramain c57b6ff0e4 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts' into w/8.4/bugfix/CLDSRV-256-fixAndTestUpdateRequestContexts 2022-08-12 13:27:39 -07:00
Jonathan Gramain 43cc84ac9b Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-261-release-7.10.8' into w/8.5/improvement/CLDSRV-261-release-7.10.8 2022-08-11 12:15:52 -07:00
Jonathan Gramain 806c79be7c Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-261-release-7.10.8' into w/8.4/improvement/CLDSRV-261-release-7.10.8 2022-08-11 11:54:56 -07:00
bert-e 760ef6e0d8 Merge branch 'w/8.4/improvement/CLDSRV-260-remove-package-lock-json' into tmp/octopus/w/8.5/improvement/CLDSRV-260-remove-package-lock-json 2022-08-11 08:40:21 +00:00
bert-e dcc1b32049 Merge branch 'w/7.70/improvement/CLDSRV-260-remove-package-lock-json' into tmp/octopus/w/8.4/improvement/CLDSRV-260-remove-package-lock-json 2022-08-11 08:40:21 +00:00
bert-e 7bd3ec9954 Merge branch 'w/8.4/improvement/CLDSRV-259-bump-arsenal-7-10-31' into tmp/octopus/w/8.5/improvement/CLDSRV-259-bump-arsenal-7-10-31 2022-08-10 12:34:55 +00:00
KillianG 0b7c6a76cc
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-259-bump-arsenal-7-10-31' into w/8.4/improvement/CLDSRV-259-bump-arsenal-7-10-31 2022-08-10 14:34:32 +02:00
bert-e f115aeb7c2 Merge branch 'w/8.4/improvement/CLDSRV-259-bump-arsenal-7-10-31' into tmp/octopus/w/8.5/improvement/CLDSRV-259-bump-arsenal-7-10-31 2022-08-10 10:07:27 +00:00
KillianG add9e37712
Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-259-bump-arsenal-7-10-31' into w/8.4/improvement/CLDSRV-259-bump-arsenal-7-10-31 2022-08-10 12:06:35 +02:00
bert-e c0082d495d Merge branch 'w/8.4/bugfix/CLDSRV-258-setOriginOpInTaggingAndACLOps' into tmp/octopus/w/8.5/bugfix/CLDSRV-258-setOriginOpInTaggingAndACLOps 2022-08-08 20:46:14 +00:00
Jonathan Gramain 555b583354 Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-258-setOriginOpInTaggingAndACLOps' into w/8.4/bugfix/CLDSRV-258-setOriginOpInTaggingAndACLOps 2022-08-08 13:45:30 -07:00
bert-e e70753446f Merge branch 'w/8.4/improvement/CLDSRV-253-skipFlakyACLTests' into tmp/octopus/w/8.5/improvement/CLDSRV-253-skipFlakyACLTests 2022-07-29 23:49:15 +00:00
bert-e 29a3a6d845 Merge branch 'w/7.70/improvement/CLDSRV-253-skipFlakyACLTests' into tmp/octopus/w/8.4/improvement/CLDSRV-253-skipFlakyACLTests 2022-07-29 23:49:14 +00:00
Nicolas Humbert f2e7aec6c8 CLDSRV-252 Bump CloudServer 8.5.11 2022-07-28 17:45:32 -04:00
bert-e 9e85e8dd9e Merge branch 'bugfix/CLDSRV-246/objectPutVersion' into q/8.5 2022-07-28 17:39:28 +00:00
Nicolas Humbert f3f4937578 CLDSRV-246 PutObjectVersion always returns 403 2022-07-27 17:11:38 -04:00
bert-e bcb879c2ff Merge branch 'w/8.4/bugfix/CLDSRV-250/fix_acl_replication' into tmp/octopus/w/8.5/bugfix/CLDSRV-250/fix_acl_replication 2022-07-26 21:09:42 +00:00
Taylor McKinnon 615ace071f Merge remote-tracking branch 'origin/w/7.70/bugfix/CLDSRV-250/fix_acl_replication' into w/8.4/bugfix/CLDSRV-250/fix_acl_replication 2022-07-26 14:09:05 -07:00
Taylor McKinnon d4ae083b5a Merge remote-tracking branch 'origin/w/8.4/improvement/CLDSRV-249/release_7_10_6' into w/8.5/improvement/CLDSRV-249/release_7_10_6 2022-07-22 14:39:36 -07:00
Taylor McKinnon e97e410ee4 Merge remote-tracking branch 'origin/w/7.70/improvement/CLDSRV-249/release_7_10_6' into w/8.4/improvement/CLDSRV-249/release_7_10_6 2022-07-22 14:33:33 -07:00
Kerkesni 3d86abd70a
bugfix: CLDSRV-248 bump cloudserver to 8.5.9 2022-07-22 15:49:24 +02:00
Kerkesni dc4dd2595e
bugfix: CLDSRV-248 fix azure mpu replication issue 2022-07-22 15:49:24 +02:00
bert-e b712df6a1f Merge branch 'feature/CLDSRV-243-allow-backbeat-to-update-non-versioned-objects' into q/8.5 2022-07-20 08:14:26 +00:00
Kerkesni 356c9f1e9c
feature: CLDSRV-243 bump cloudserver 8.5.8 2022-07-20 09:45:03 +02:00
Nicolas Humbert 51e28def0e CLDSRV-247 Bump Arsenal 8.1.62 2022-07-19 09:23:05 +02:00
bert-e 55b0400b25 Merge branches 'w/8.5/improvement/CLDSRV-239-version-bump' and 'q/4656/8.4/improvement/CLDSRV-239-version-bump' into tmp/octopus/q/8.5 2022-07-18 11:36:08 +00:00
bert-e 4fc5ac5e58 Merge branches 'w/8.4/improvement/CLDSRV-239-version-bump' and 'q/4656/7.70/improvement/CLDSRV-239-version-bump' into tmp/octopus/q/8.4 2022-07-18 11:36:07 +00:00
bert-e c2f86b63eb Merge branch 'w/8.4/improvement/CLDSRV-239-version-bump' into tmp/octopus/w/8.5/improvement/CLDSRV-239-version-bump 2022-07-18 11:09:11 +00:00
bert-e 35ecf8e556 Merge branch 'w/7.70/improvement/CLDSRV-239-version-bump' into tmp/octopus/w/8.4/improvement/CLDSRV-239-version-bump 2022-07-18 11:09:11 +00:00
bert-e 4e89f4b025 Merge branches 'w/8.5/improvement/CLDSRV-234-bump-arsenal' and 'q/4630/8.4/improvement/CLDSRV-234-bump-arsenal' into tmp/octopus/q/8.5 2022-07-18 09:28:11 +00:00
bert-e fc5bde533c Merge branch 'improvement/CLDSRV-234-bump-arsenal' into q/8.4 2022-07-18 09:28:11 +00:00
Rached Ben Mustapha 4fa5bf7409 CLDSRV-245: temporarily disable objectPutVersion action match 2022-07-14 15:27:19 +00:00
Will Toozs 5effe07ebd
Merge remote-tracking branch 'origin/improvement/CLDSRV-234-bump-arsenal' into w/8.5/improvement/CLDSRV-234-bump-arsenal 2022-07-13 18:30:19 +02:00
Will Toozs dce1f83322
improvement/CLDSRV-234: change build memory 2022-07-13 17:35:47 +02:00
Kerkesni 7e405ff963
feature: CLDSRV-243 allow backbeat to update non versioned objects
When replicating a non versioned OOB object, Backbeat’s ReplicationStatusProcessor
needs to only update the master version of that object.
2022-07-13 15:29:29 +02:00
bert-e ce705c8e78 Merge branch 'w/8.4/improvement/CLDSRV-237-revert-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-237-revert-tagging 2022-07-13 09:37:38 +00:00
Will Toozs 2ca6fb2fe6
CLDSRV-237-revert-bucket-tagging 2022-07-13 11:36:42 +02:00
bert-e 7b423666fe Merge branch 'w/7.70/improvement/CLDSRV-237-revert-tagging' into tmp/octopus/w/8.4/improvement/CLDSRV-237-revert-tagging 2022-07-13 09:30:58 +00:00
bert-e ec07bedd0b Merge branch 'w/8.4/improvement/CLDSRV-237-revert-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-237-revert-tagging 2022-07-13 09:18:08 +00:00
bert-e dc76bbb5c4 Merge branch 'w/7.70/improvement/CLDSRV-237-revert-tagging' into tmp/octopus/w/8.4/improvement/CLDSRV-237-revert-tagging 2022-07-13 09:18:08 +00:00
bert-e e130629ff9 Merge branch 'bugfix/CLDSRV-241/put-metadata' into q/8.5 2022-07-12 16:40:11 +00:00
Nicolas Humbert 8e60c2d300 CLDSRV-241 Allow updating object metadata with empty data location 2022-07-12 18:18:54 +02:00
Will Toozs 5c1b237e9e
CLDSRV-234: lockfile 2022-07-12 11:43:46 +02:00
Will Toozs 8d7bd0809c
CLDSRV-234: bump arsenal 2022-07-12 11:43:45 +02:00
bert-e 31b3469e4a Merge branch 'w/8.4/improvement/CLDSRV-235-bump-package.json' into tmp/octopus/w/8.5/improvement/CLDSRV-235-bump-package.json 2022-07-11 12:12:17 +00:00
Anurag Mittal d841a31bf3
Merge remote-tracking branch 'origin/improvement/CLDSRV-235-bump-package.json' into w/8.4/improvement/CLDSRV-235-bump-package.json 2022-07-11 14:11:43 +02:00
Taylor McKinnon 47c6c7acf3 Merge remote-tracking branch 'origin/development/8.5' into w/8.5/bugfix/CLDSRV-232/dont_send_empty_NextContinuationToken_on_listing_end 2022-07-06 10:56:04 -07:00
Nicolas Humbert 493ba63384 CLDSRV-233 Bump CloudServer 8.5.5 2022-07-06 15:39:51 +02:00
Nicolas Humbert 6f1c3286a9 fix object Lock tests 2022-07-06 10:01:38 +02:00
bert-e b8058920d7 Merge branch 'bugfix/CLDSRV-232/dont_send_empty_NextContinuationToken_on_listing_end' into tmp/octopus/w/8.4/bugfix/CLDSRV-232/dont_send_empty_NextContinuationToken_on_listing_end 2022-07-05 22:59:46 +00:00
bert-e 1138a5952c Merge branch 'w/8.4/bugfix/CLDSRV-232/dont_send_empty_NextContinuationToken_on_listing_end' into tmp/octopus/w/8.5/bugfix/CLDSRV-232/dont_send_empty_NextContinuationToken_on_listing_end 2022-07-05 22:59:46 +00:00
Nicolas Humbert 53708a5197 update arsenal 8.1.58 2022-07-05 22:00:12 +02:00
Nicolas Humbert 4e0a497367 CLDSRV-231 location type DMF prevents cloudserver from starting 2022-07-01 15:34:13 +02:00
williamlardier 7e6d5e5629
CLDSRV-224: return only once 2022-06-21 11:32:30 +02:00
williamlardier 266776650e
CLDSRV-224: bump package.json version 2022-06-21 10:01:24 +02:00
williamlardier 36d910fd97
CLDSRV-224: switch back to callback mode 2022-06-21 10:00:08 +02:00
williamlardier f5781c3609
CLDSRV-224: add monitoring for bucket tagging APIs 2022-06-21 10:00:08 +02:00
williamlardier 3378b6a439
CLDSRV-224: tests fixups 2022-06-21 10:00:07 +02:00
Will Toozs f47687de10
CLDSRV-224: bucket tagging tests 2022-06-21 10:00:07 +02:00
Will Toozs 9899e95cab
CLDSRV-224: add getBucketTagging 2022-06-21 10:00:07 +02:00
williamlardier 4c60757086
CLDSRV-225: switch back to callback mode 2022-06-21 09:57:03 +02:00
Will Toozs 09062e3020
CLDSRV-225: Implement delete bucket tagging 2022-06-21 09:57:01 +02:00
bert-e 2389f36f34 Merge branch 'feature/CLDSRV-223-implement-putbuckettagging' into q/8.5 2022-06-21 07:18:20 +00:00
Ronnie Smith 0852be8a2b
feature: CLDSRV-229 bump to 8.5.3 2022-06-17 11:47:02 -07:00
KillianG 3a5236239e
CLDSRV-167: use arsenal parse tag function 2022-06-17 10:43:23 +02:00
williamlardier a77e558d53
CLDSRV-223: Bump Arsenal 2022-06-17 10:43:02 +02:00
williamlardier 888b760834
CLDSRV-223: Fix comparison and add tests 2022-06-17 10:43:01 +02:00
KillianG a1c8c9adc1
CLDSRV-223: Implement bucket owner check and change check order
Checking order has been changed to fit with AWS one, meaning that we
raise the first error we face

- Checking every tag one by one
- Checking that the tag key is unique, the error is raised only if we
  already seen a tag with the same key before
- Checking that the key length is less than 128
- Checking that the tag value length is less than 256
- After seeing more than 50 tags, if there is one more, raise the too
  many tags error
2022-06-17 10:43:01 +02:00
KillianG 7ac2adb23f
CLDSRV-167: implement put bucket tagging 2022-06-17 10:43:01 +02:00
bert-e 3e42758950 Merge branch 'feature/CLDSRV-220/add-s3-object-restore-operation' into q/8.5 2022-06-15 17:41:43 +00:00
bert-e aa70b840b7 Merge branch 'w/8.4/improvement/CLDSRV-228-bump-buckettagging' into tmp/octopus/w/8.5/improvement/CLDSRV-228-bump-buckettagging 2022-06-15 14:08:00 +00:00
Will Toozs 90b7316043
Merge remote-tracking branch 'origin/improvement/CLDSRV-228-bump-buckettagging' into w/8.4/improvement/CLDSRV-228-bump-buckettagging 2022-06-15 16:07:33 +02:00
bert-e 8daac3c50b Merge branches 'w/8.5/improvement/CLDSRV-169-get-bucket-tagging' and 'q/4481/8.4/improvement/CLDSRV-169-get-bucket-tagging' into tmp/octopus/q/8.5 2022-06-15 08:39:13 +00:00
bert-e 835cd193d4 Merge branches 'w/8.4/improvement/CLDSRV-169-get-bucket-tagging' and 'q/4481/7.10/improvement/CLDSRV-169-get-bucket-tagging' into tmp/octopus/q/8.4 2022-06-15 08:39:12 +00:00
bert-e 27777296bf Merge branch 'w/8.4/improvement/CLDSRV-169-get-bucket-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-169-get-bucket-tagging 2022-06-15 08:11:55 +00:00
Will Toozs d8f73ce56c
Merge remote-tracking branch 'origin/improvement/CLDSRV-169-get-bucket-tagging' into w/8.4/improvement/CLDSRV-169-get-bucket-tagging 2022-06-15 10:11:29 +02:00
bert-e 8fe33dee76 Merge branches 'w/8.5/improvement/CLDSRV-168-delete-bucket-tagging' and 'q/4480/8.4/improvement/CLDSRV-168-delete-bucket-tagging' into tmp/octopus/q/8.5 2022-06-14 18:53:30 +00:00
bert-e cff15fe737 Merge branches 'w/8.4/improvement/CLDSRV-168-delete-bucket-tagging' and 'q/4480/7.10/improvement/CLDSRV-168-delete-bucket-tagging' into tmp/octopus/q/8.4 2022-06-14 18:53:30 +00:00
bert-e dec7a13106 Merge branch 'w/8.4/improvement/CLDSRV-168-delete-bucket-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-168-delete-bucket-tagging 2022-06-14 18:29:02 +00:00
Will Toozs 6ce675ce01
Merge remote-tracking branch 'origin/improvement/CLDSRV-168-delete-bucket-tagging' into w/8.4/improvement/CLDSRV-168-delete-bucket-tagging 2022-06-14 20:28:28 +02:00
bert-e 32658b7e3b Merge branch 'w/8.4/improvement/CLDSRV-167-put-bucket-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-167-put-bucket-tagging 2022-06-14 15:31:55 +00:00
KillianG fcf617acf0
Merge remote-tracking branch 'origin/improvement/CLDSRV-167-put-bucket-tagging' into w/8.4/improvement/CLDSRV-167-put-bucket-tagging 2022-06-14 17:31:29 +02:00
bert-e 67e1611edb Merge branches 'development/8.5' and 'w/8.4/improvement/CLDSRV-167-put-bucket-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-167-put-bucket-tagging 2022-06-14 12:57:38 +00:00
bert-e 05532878d6 Merge branch 'development/8.4' into tmp/octopus/w/8.4/improvement/CLDSRV-167-put-bucket-tagging 2022-06-14 12:57:37 +00:00
KillianG 68037356f9
Merge remote-tracking branch 'origin/improvement/CLDSRV-167-put-bucket-tagging' into w/8.4/improvement/CLDSRV-167-put-bucket-tagging 2022-06-14 14:57:09 +02:00
Ronnie Smith fdfb76a99a
feature: CLDSRV-220 add origin op for object restore 2022-06-13 15:19:14 -07:00
williamlardier ab59e98977
Merge remote-tracking branch 'origin/bugfix/CLDSRV-219-bump-arsenal-version-with-sproxydclient-fix' into w/8.5/bugfix/CLDSRV-219-bump-arsenal-version-with-sproxydclient-fix 2022-06-10 14:48:14 +02:00
williamlardier 505c421014
CLDSRV-219: bump arsenal version to fix ts related errors 2022-06-10 14:45:08 +02:00
williamlardier e046b87eec
CLDSRV-219: skip NonCurrentTransition obsolete test 2022-06-10 11:42:18 +02:00
williamlardier 1b2f2478c8
CLDSRV-219: bump Arsenal version 2022-06-10 11:41:44 +02:00
bert-e f476a11faf Merge branches 'w/8.5/improvement/CLDSRV-216' and 'q/4575/8.4/improvement/CLDSRV-216' into tmp/octopus/q/8.5 2022-06-08 20:26:59 +00:00
bert-e f61c585184 Merge branch 'improvement/CLDSRV-216' into q/8.4 2022-06-08 20:26:59 +00:00
bert-e 0174252f54 Merge branch 'improvement/CLDSRV-216' into tmp/octopus/w/8.5/improvement/CLDSRV-216 2022-06-08 18:46:13 +00:00
Francois Ferrand 5a13eecd05
Use standard release procedure
Bump version **when** doing the release, not afterwards, as it is done
almost everywhere else:

* It makes the git history harder to read, with a tag on any commit AND
a separate commit (anywhere) which bumps the version
* It does not work with “yarn version” : which creates a misleadingly
commit named “v8.x.x” in our repos
* It requires creating an extra ticket after the release ; and this
ticket does not actually represent anything: it is just a placeholder to “please” Bert-e (whereas a ticket fo>
* It creates the risk this extra ticket/bump is done, and someone
creating another bump when doing the next release (i.e. skipping a
release)

Issue: CLDSRV-216
2022-06-08 20:00:50 +02:00
bert-e bbd2dfefd7 Merge branch 'feature/CLDSRV-211' into q/8.4 2022-06-07 22:12:29 +00:00
bert-e b491a3e92c Merge branches 'w/8.5/feature/CLDSRV-211' and 'q/4549/8.4/feature/CLDSRV-211' into tmp/octopus/q/8.5 2022-06-07 22:12:29 +00:00
bert-e 9cb550514e Merge branch 'feature/CLDSRV-214' into q/8.5 2022-06-07 21:39:26 +00:00
Nicolas Humbert 74df4fcd65 CLDSRV-217 Bump CloudServer 8.5.2 2022-06-07 08:35:25 -04:00
bert-e 3a842d3b93 Merge branch 'w/8.4/improvement/CLDSRV-167-put-bucket-tagging' into tmp/octopus/w/8.5/improvement/CLDSRV-167-put-bucket-tagging 2022-06-07 11:55:06 +00:00
KillianG 63a790a3d7
Revert "Merge remote-tracking branch 'origin/improvement/CLDSRV-167-put-bucket-tagging' into w/8.4/improvement/CLDSRV-167-put-bucket-tagging"
This reverts commit 518253dc5e, reversing
changes made to ca91044fde.
2022-06-07 13:54:50 +02:00
KillianG 518253dc5e
Merge remote-tracking branch 'origin/improvement/CLDSRV-167-put-bucket-tagging' into w/8.4/improvement/CLDSRV-167-put-bucket-tagging 2022-06-07 13:54:37 +02:00
Francois Ferrand 717c5c6a7d
Remove left-over tiny version id
Tiny version id code has been removed, there is only Short version id,
which is handled fully in Arsenal by setting S3_VERSION_ID_ENCODING_TYPE
variable.

The `VersionId.encode()` function does not even actually support passing
an encoding type anymore, so the code was of no use anymore.

Issue: CLDSRV-214
2022-06-07 10:09:46 +02:00
bert-e 22fdaad636 Merge branches 'w/8.5/bugfix/CLDSRV-215/delete-objects' and 'q/4560/8.4/bugfix/CLDSRV-215/delete-objects' into tmp/octopus/q/8.5 2022-06-02 19:42:44 +00:00
bert-e ca91044fde Merge branch 'q/4560/7.10/bugfix/CLDSRV-215/delete-objects' into tmp/normal/q/8.4 2022-06-02 19:42:44 +00:00
bert-e 266e0f9aa3 Merge branch 'w/8.4/bugfix/CLDSRV-215/delete-objects' into tmp/normal/q/8.4 2022-06-02 19:42:44 +00:00
bert-e b63f210b8f Merge branch 'w/8.4/bugfix/CLDSRV-215/delete-objects' into tmp/octopus/w/8.5/bugfix/CLDSRV-215/delete-objects 2022-06-02 12:53:16 +00:00
Nicolas Humbert 61f30e659c CLDSRV-215 adapt tests for CEPH 2022-06-02 08:52:48 -04:00
bert-e 2c375ee10d Merge branch 'w/8.4/bugfix/CLDSRV-215/delete-objects' into tmp/octopus/w/8.5/bugfix/CLDSRV-215/delete-objects 2022-06-02 12:45:05 +00:00
bert-e 8638f00c95 Merge branch 'bugfix/CLDSRV-215/delete-objects' into tmp/octopus/w/8.4/bugfix/CLDSRV-215/delete-objects 2022-06-02 12:45:05 +00:00
Ronnie Smith 9b4b755cf8
Merge remote-tracking branch 'origin/w/8.4/feature/CLDSRV-174-use-armory-work' into w/8.5/feature/CLDSRV-174-use-armory-work 2022-06-01 22:18:29 -07:00
Ronnie Smith 0f5ab42233
Merge remote-tracking branch 'origin/feature/CLDSRV-174-use-armory-work' into w/8.4/feature/CLDSRV-174-use-armory-work 2022-06-01 19:31:28 -07:00
Nicolas Humbert cb591f06fc CLDSRV-213 Check object has a cold location before putObjectVersion 2022-05-26 14:25:26 -04:00
bert-e abcbb75ad0 Merge branch 'feature/CLDSRV-211' into tmp/octopus/w/8.5/feature/CLDSRV-211 2022-05-25 09:17:32 +00:00
Francois Ferrand 1807abe656
Remove default replication endpoint mandate
The default replication endpoint should not be needed, esp. since the
one we use (in zenko) does not relate to an existing location: it is
used only to let Cloudserver start.

Issue: CLDSRV-211
2022-05-25 10:52:05 +02:00
williamlardier 8c939b3a05
CLDSRV-188: bump package.json to 8.5.1 2022-05-24 09:08:52 +02:00
williamlardier 5795a44e4c
CLDSRV-184: accept list of valid storage classes
Co-authored-by: Naren <54150791+naren-rajendran@users.noreply.github.com>
2022-05-24 08:26:33 +02:00
williamlardier 8c7862b51d
CLDSRV-184: apply changes to CopyObject API 2022-05-24 08:26:15 +02:00
williamlardier bf9d9fe3c5
CLDSRV-184: apply changes to MPU API 2022-05-24 08:26:15 +02:00
williamlardier 127ce9c619
CLDSRV-184: Update PutObject and add unit tests 2022-05-24 08:26:14 +02:00
bert-e a10c705310 Merge branch 'feature/CLDSRV-183-implement-restore-object-api' into q/8.5 2022-05-23 17:14:07 +00:00
Xin LI cb518a8f08 improvement: CLDSRV-183 implement restoreObject api 2022-05-23 18:52:20 +02:00
bert-e dc408bcc3c Merge branch 'feature/CLDSRV-205/update-restore' into q/8.5 2022-05-23 14:54:36 +00:00
Nicolas Humbert 7afced91ba CLDSRV-205 Update restore properties when putting object version 2022-05-20 16:27:55 -04:00
williamlardier 91c1eca7d6
CLDSRV-209: fix dmf checker and default values 2022-05-20 13:43:25 +02:00
williamlardier 78498030ab
CLDSRV-209: switch to guard clause 2022-05-20 10:50:54 +02:00
williamlardier 60a917cc08
CLDSRV-209: allow to run cloudserver with empty details for dmf 2022-05-20 09:55:01 +02:00
bert-e 687b8565f9 Merge branch 'improvement/CLDSRV-208' into tmp/octopus/w/8.5/improvement/CLDSRV-208 2022-05-19 17:55:52 +00:00
Francois Ferrand da633f6b49
Use pre-built python 3.7 from distribution
Issue: CLDSRV-208
2022-05-19 19:25:30 +02:00
bert-e f6a66881fd Merge branch 'feature/CLDSRV-185-put-bucket-support-restore-object' into q/8.5 2022-05-18 13:53:03 +00:00
bert-e fda4c099b3 Merge branch 'feature/CLDSRV-204/simple-mpu-version' into q/8.5 2022-05-17 16:21:58 +00:00
williamlardier 3d0ee3fc4b
CLDSRV-185: add functional tests 2022-05-17 09:48:42 +02:00
williamlardier 6c2661eea5
CLDSRV-185: add unit tests 2022-05-17 08:46:19 +02:00
williamlardier 413b52f4d0
CLDSRV-185: add dmf location in json config file 2022-05-17 08:45:17 +02:00
williamlardier bf23f09c41
CLDSRV-185: clean dmf check function 2022-05-17 08:44:50 +02:00
Ronnie Smith 89d030bfe9
CLDSRV-185: add dmf location type 2022-05-17 08:43:52 +02:00
williamlardier 2d91180469
CLDSRV-185: reject bucket put if cold storage location 2022-05-17 08:37:24 +02:00
bert-e 64b83e9f2c Merge branch 'feature/CLDSRV-186-get-object-to-support-restore-object' into q/8.5 2022-05-17 06:34:06 +00:00
Nicolas Humbert 2ab4f3139e clean up putVersion tests 2022-05-16 22:31:51 -04:00
Nicolas Humbert 87b199dc6f CLDSRV-204 Complete MPU with x-scal-s3-version-id header 2022-05-16 20:54:04 -04:00
Nicolas Humbert a0c97d2c06 update the microVersionId 2022-05-16 15:16:11 -04:00
Nicolas Humbert 8fd7b3ed30 CLDSRV-202 Put object version with x-scal-s3-version-id 2022-05-16 11:01:52 -04:00
williamlardier eeb6ff8c3b
CLDSRV-186: comply with AWS errors 2022-05-16 11:44:21 +02:00
williamlardier 9922acf8f9
CLDSRV-186: fixups 2022-05-16 11:44:21 +02:00
williamlardier 9d7c7dedbb
CLDSRV-186: add unit tests for new Get Object 2022-05-16 11:44:21 +02:00
williamlardier 3ff27f488f
CLDSRV-186: return cold storage related errors in Get Object 2022-05-16 11:44:20 +02:00
williamlardier a5df203ca2
CLDSRV-187: update yarn lock 2022-05-16 11:43:02 +02:00
williamlardier 58e2625e3e
CLDSRV-187: move coldStorage file 2022-05-16 11:43:02 +02:00
williamlardier c0acd4dc1b
CLDSRV-187: fixups 2022-05-16 11:43:02 +02:00
williamlardier 78f131df56
CLDSRV-187: remove undefined header from response 2022-05-16 11:43:01 +02:00
williamlardier 721f54cb92
CLDSRV-187: add dmf in test config legacy 2022-05-16 11:43:01 +02:00
Yutaka Oishi a39c3f475c
CLDSRV-187: Use util function to compute returned header 2022-05-16 11:43:01 +02:00
williamlardier dd20b82745
CLDSRV-187: Cleanup 2022-05-16 11:43:00 +02:00
williamlardier 62163eb8aa
CLDSRV-187: Linting 2022-05-16 11:43:00 +02:00
williamlardier 7ae162ee0c
CLDSRV-187: Add tests for new HEAD object feature 2022-05-16 11:43:00 +02:00
williamlardier 54d313fffa
CLDSRV-187: Add util file for bucket and object MD mocking 2022-05-16 11:42:59 +02:00
williamlardier d22b6b6aa5
CLDSRV-187: HEAD to handle object in cold storage 2022-05-16 11:42:59 +02:00
williamlardier 5acdfccdea
CLDSRV-187: update arsenal 2022-05-16 11:42:59 +02:00
Nicolas Humbert 30f6c803af CLDSRV-192 Introduce s3:PutObjectVersion permission 2022-05-12 14:46:45 -07:00
Francois Ferrand a235a1a175
Merge remote-tracking branch 'origin/bugfix/CLDSRV-201' into w/8.5/bugfix/CLDSRV-201 2022-05-12 15:20:18 +02:00
Francois Ferrand d5a9960967
Bump version 8.4.10
Issue: CLDSRV-201
2022-05-12 15:13:33 +02:00
Francois Ferrand fffba0bdbb
Increase memory for cloudserver docker image build
Issue: CLDSRV-201
2022-05-12 11:58:33 +02:00
bert-e 2a165dc0da Merge branch 'w/8.4/bugfix/CLDSRV-196/fixLoggerConfiguration' into tmp/octopus/w/8.5/bugfix/CLDSRV-196/fixLoggerConfiguration 2022-05-11 21:45:38 +00:00
bert-e 1003b77abd Merge branch 'bugfix/CLDSRV-196/fixLoggerConfiguration' into tmp/octopus/w/8.4/bugfix/CLDSRV-196/fixLoggerConfiguration 2022-05-11 21:45:38 +00:00
Alexander Chan 7fd547db24 CLDSRV-196: create new werelogs object over using global werelogs 2022-05-11 14:36:27 -07:00
Rahul Padigela 830ddb6e3c Merge remote-tracking branch 'origin/improvement/CLDSRV-193-bump-cloudserver-8-4-9' into w/8.5/improvement/CLDSRV-193-bump-cloudserver-8-4-9 2022-05-11 11:55:04 -07:00
KillianG 8d00ef947d
CLDSRV-198: bump cloudserver 8.5.0 2022-05-11 16:32:34 +02:00
KillianG fe432a0edb
CLDSRV-193: bump cloudserver 8.4.9 2022-05-11 16:01:48 +02:00
Kerkesni 25a70dddff
feature: CLDSRV-178 match aws putBucketNotification error 2022-05-09 17:52:58 +02:00
Xin LI 03fa7d00a2 improvement: CLDSRV-191 migrate arsenal to cloudserver 8.x 2022-05-09 16:44:53 +02:00
bert-e 90c98b5fae Merge branch 'feature/CLDSRV-175-support-object-acl-put-notification' into q/8.4 2022-04-22 16:56:48 +00:00
bert-e 8d52258ec5 Merge branch 'w/8.2/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change' into tmp/octopus/w/8.3/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change 2022-04-21 16:49:33 +00:00
bert-e 44549c79d2 Merge branch 'w/8.3/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change' into tmp/octopus/w/8.4/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change 2022-04-21 16:49:33 +00:00
Jordi Bertran de Balanda 9837c0a0a7 Merge remote-tracking branch 'origin/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change' into w/8.2/bugfix/CLDSRV-179-align-with-arsenal-mddb-api-change 2022-04-21 18:35:28 +02:00
Kerkesni 89f286eab6
feature: CLDSRV-175 add support for s3:ObjectACL:Put notification
Notification event should only be instanciated when a new ACL is created
or when an old one is changed, ACL requests that change nothing don't generate
a notification event
2022-04-21 16:26:29 +02:00
bert-e e83057c75c Merge branch 'feature/CLDSRV-176-support-object-tagging-notifications' into q/8.4 2022-04-21 08:16:39 +00:00
bert-e 48aaede6db Merge branch 'w/8.3/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into tmp/octopus/w/8.4/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-20 17:04:59 +00:00
bert-e 0c704913a3 Merge branch 'w/8.2/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into tmp/octopus/w/8.3/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-20 17:04:58 +00:00
Jonathan Gramain ad9e34c5fd Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into w/8.2/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-20 10:04:40 -07:00
bert-e b486489888 Merge branch 'w/8.3/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into tmp/octopus/w/8.4/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-19 22:59:28 +00:00
bert-e 8b0093b56b Merge branch 'w/8.2/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into tmp/octopus/w/8.3/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-19 22:59:28 +00:00
Jonathan Gramain a667315a4c Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck' into w/8.2/bugfix/CLDSRV-177-checkEmptyLocationsInLocationsSanityCheck 2022-04-19 15:38:47 -07:00
Kerkesni 53d9f8cf99
feature: CLDSRV-176 support s3:ObjectTagging:* notifications 2022-04-19 16:05:48 +02:00
bert-e fe923f679d Merge branches 'w/8.4/bugfix/CLDSRV-173/delete-marker-lifecycle' and 'q/4444/8.3/bugfix/CLDSRV-173/delete-marker-lifecycle' into tmp/octopus/q/8.4 2022-04-14 21:18:11 +00:00
bert-e 84cbef6210 Merge branches 'w/8.3/bugfix/CLDSRV-173/delete-marker-lifecycle' and 'q/4444/8.2/bugfix/CLDSRV-173/delete-marker-lifecycle' into tmp/octopus/q/8.3 2022-04-14 21:18:11 +00:00
bert-e 59e49ee50d Merge branches 'w/8.2/bugfix/CLDSRV-173/delete-marker-lifecycle' and 'q/4444/7.10/bugfix/CLDSRV-173/delete-marker-lifecycle' into tmp/octopus/q/8.2 2022-04-14 21:18:10 +00:00
bert-e 3b63f5ec27 Merge branch 'w/8.3/bugfix/CLDSRV-173/delete-marker-lifecycle' into tmp/octopus/w/8.4/bugfix/CLDSRV-173/delete-marker-lifecycle 2022-04-14 20:55:54 +00:00
bert-e 8bae222521 Merge branch 'w/8.2/bugfix/CLDSRV-173/delete-marker-lifecycle' into tmp/octopus/w/8.3/bugfix/CLDSRV-173/delete-marker-lifecycle 2022-04-14 20:55:54 +00:00
Nicolas Humbert 95d218c824 Merge remote-tracking branch 'origin/bugfix/CLDSRV-173/delete-marker-lifecycle' into w/8.2/bugfix/CLDSRV-173/delete-marker-lifecycle 2022-04-14 16:35:13 -04:00
Francois Ferrand 2a80c583a6
Rename dashboard
Issue: CLDSRV-172
2022-04-13 12:20:09 +02:00
bert-e 74d1a26dc9 Merge branches 'w/8.3/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' and 'q/4417/8.2/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into tmp/octopus/q/8.3 2022-04-12 02:10:52 +00:00
bert-e 3fd0b95665 Merge branches 'w/8.4/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' and 'q/4417/8.3/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into tmp/octopus/q/8.4 2022-04-12 02:10:52 +00:00
bert-e c79597e387 Merge branches 'w/8.2/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' and 'q/4417/7.10/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into tmp/octopus/q/8.2 2022-04-12 02:10:51 +00:00
bert-e 4c3378d1b9 Merge branch 'w/8.3/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into tmp/octopus/w/8.4/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart 2022-04-12 01:39:21 +00:00
bert-e 44744ffbe3 Merge branch 'w/8.2/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into tmp/octopus/w/8.3/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart 2022-04-12 01:39:20 +00:00
Jonathan Gramain 0e4d685957 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart' into w/8.2/bugfix/CLDSRV-170-raceCompleteMPUAndUploadPart 2022-04-11 18:23:39 -07:00
bert-e 683c7c96af Merge branch 'w/8.3/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails' into tmp/octopus/w/8.4/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails 2022-04-11 23:00:07 +00:00
bert-e b21ba96907 Merge branch 'w/8.2/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails' into tmp/octopus/w/8.3/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails 2022-04-11 23:00:07 +00:00
bert-e 2cdd882b42 Merge branch 'w/7.10/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails' into tmp/octopus/w/8.2/bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails 2022-04-11 23:00:07 +00:00
Francois Ferrand 46eda1cc14
Use 'mean' computatino for dl/ul chunk size panels
Issue: CLDSRV-166
2022-04-06 18:57:16 +02:00
Francois Ferrand 3366f2339c
Display internal cloudserver metrics in dashboard
Use a templating variable to let the user select which information to
show.

Issue: CLDSRV-166
2022-04-06 18:57:16 +02:00
Francois Ferrand 10e3d4da63
Bump version to 8.4.8 2022-03-31 21:46:07 +02:00
Francois Ferrand 94ba153d7f
Fix computation for http requests panels
Need to use `sum` aggregation to get a meaningful Stat value.

Issue: CLDSRV-164
2022-03-30 10:16:54 +02:00
Francois Ferrand 12408bffa8
Fix success rate gauge
- Filtering out period with no http requests (NaN)
- Display greyed-out "-" when no data
- Compute mean value over current range

Issue: CLDSRV-164
2022-03-30 10:15:01 +02:00
bert-e 229c3eb251 Merge branch 'w/8.3/bugfix/CLDSRV-163-skip-failing-test' into tmp/octopus/w/8.4/bugfix/CLDSRV-163-skip-failing-test 2022-03-29 19:39:43 +00:00
bert-e 6e7fdfd8a9 Merge branch 'w/8.2/bugfix/CLDSRV-163-skip-failing-test' into tmp/octopus/w/8.3/bugfix/CLDSRV-163-skip-failing-test 2022-03-29 19:39:42 +00:00
bert-e 625bc7f5e5 Merge branch 'w/7.10/bugfix/CLDSRV-163-skip-failing-test' into tmp/octopus/w/8.2/bugfix/CLDSRV-163-skip-failing-test 2022-03-29 19:39:42 +00:00
Kerkesni 4b073de34f
improvement: CLDSRV-152 bump version to 8.4.7 2022-03-25 19:22:17 +01:00
bert-e ad257db42d Merge branches 'w/8.4/bugfix/CLDSRV-159-include-listing-phd-bugfix' and 'q/4376/8.3/bugfix/CLDSRV-159-include-listing-phd-bugfix' into tmp/octopus/q/8.4 2022-03-25 15:43:06 +00:00
bert-e 7cc1926fd2 Merge branch 'bugfix/CLDSRV-159-include-listing-phd-bugfix' into q/8.3 2022-03-25 15:43:05 +00:00
Kerkesni 05f99c74e3
Merge remote-tracking branch 'origin/bugfix/CLDSRV-159-include-listing-phd-bugfix' into w/8.4/bugfix/CLDSRV-159-include-listing-phd-bugfix 2022-03-25 15:35:53 +01:00
Kerkesni 84aefc46dd
bugfix: CLDSRV-159 bump arsenal to 8.1.38 2022-03-25 15:33:52 +01:00
bert-e 3bc1150a2a Merge branch 'q/4373/8.3/bugfix/CLDSRV-157' into tmp/normal/q/8.4 2022-03-25 10:00:30 +00:00
bert-e 4617917714 Merge branch 'bugfix/CLDSRV-157' into q/8.3 2022-03-25 10:00:29 +00:00
bert-e a371727022 Merge branch 'w/8.4/bugfix/CLDSRV-157' into tmp/normal/q/8.4 2022-03-25 10:00:29 +00:00
Xin LI 4c400bdc3f improvement: CLDSRV-158 bump version to 8.4.6 2022-03-25 09:38:48 +01:00
Francois Ferrand c6af1e5cea
Set noValue on download/upload chunk size
Issue: CLDSRV-157
2022-03-24 16:50:14 +01:00
Francois Ferrand 806c0ad2dd
Fix upload/download chunk size
Issue: CLDSRV-157
2022-03-24 16:47:02 +01:00
Francois Ferrand cde78ba363
Merge remote-tracking branch 'origin/bugfix/CLDSRV-157' into w/8.4/bugfix/CLDSRV-157 2022-03-24 16:45:38 +01:00
Francois Ferrand ae841997f6
Fix upload/download chunk size
Issue: CLDSRV-157
2022-03-24 16:36:23 +01:00
bert-e 192fd70d53 Merge branch 'improvement/CLDSRV-155-search_bucket-support-list-versions' into q/8.4 2022-03-24 09:19:24 +00:00
bert-e 1499f5e67d Merge branch 'w/8.3/improvement/CLDSRV-156-upgrade-vaultclient' into tmp/octopus/w/8.4/improvement/CLDSRV-156-upgrade-vaultclient 2022-03-24 07:11:57 +00:00
Naren b7059b8329 Merge remote-tracking branch 'origin/w/8.2/improvement/CLDSRV-156-upgrade-vaultclient' into w/8.3/improvement/CLDSRV-156-upgrade-vaultclient 2022-03-23 23:40:54 -07:00
Naren b925fa7c8c Merge remote-tracking branch 'origin/w/7.10/improvement/CLDSRV-156-upgrade-vaultclient' into w/8.2/improvement/CLDSRV-156-upgrade-vaultclient 2022-03-23 23:01:14 -07:00
Francois Ferrand 886dc44ab1
Fix wrong label on reporter's Up panel
Issue: CLDSRV-149
2022-03-23 15:07:43 +01:00
Francois Ferrand 9d4723834d
Use last value for Up panels
Up and Reporter panels must show the instantaneous value, even if null:
i.e. if there are no pods.

Issue: CLDSRV-149
2022-03-23 15:06:32 +01:00
Francois Ferrand e0c1f529a6
Fix last report counter
- Formula must account for the date of last succesful `count-items`
cronjob execution.
- Unit is actually second.
- Display last value, even if null.

Issue: CLDSRV-149
2022-03-23 15:06:08 +01:00
Francois Ferrand 8b2523e78c
Merge remote-tracking branch 'origin/bugfix/CLDSRV-149-use-last-count-time' into w/8.4/bugfix/CLDSRV-149-use-last-count-time 2022-03-23 15:04:26 +01:00
Francois Ferrand f528fc9bf9
Use last value for Up panels
Up and Reporter panels must show the instantaneous value, even if null:
i.e. if there are no pods.

Issue: CLDSRV-149
2022-03-23 14:20:34 +01:00
Francois Ferrand 68b27be192
Fix last report counter
- Formula must account for the date of last succesful `count-items`
cronjob execution.
- Unit is actually second.
- Display last value, even if null.

Issue: CLDSRV-149
2022-03-23 14:20:34 +01:00
bert-e 0abac6cb61 Merge branch 'bugfix/CLDSRV-149' into q/8.3 2022-03-23 10:36:09 +00:00
bert-e a3bf81e8b7 Merge branches 'w/8.4/bugfix/CLDSRV-149' and 'q/4350/8.3/bugfix/CLDSRV-149' into tmp/octopus/q/8.4 2022-03-23 10:36:09 +00:00
Xin LI 40d67e69a5 improvement: CLDSRV-155-search_bucket support list versions 2022-03-23 09:18:23 +01:00
Francois Ferrand cbf1ee3733
Merge remote-tracking branch 'origin/bugfix/CLDSRV-149' into w/8.4/bugfix/CLDSRV-149 2022-03-21 18:09:00 +01:00
Francois Ferrand fe8fe2c13b
Merge remote-tracking branch 'origin/development/8.4' into w/8.4/bugfix/CLDSRV-149 2022-03-21 18:07:02 +01:00
Xin LI 24a49c2fb8 improvement: CLDSRV-154-bump-version 2022-03-21 16:25:52 +01:00
Xin LI 21871de66e improvement: CLDSRV-153-bump-arsenal 2022-03-21 15:17:35 +01:00
Francois Ferrand 265a212f0e
Show status of report-handler
Issue: CLDSRV-149
2022-03-21 09:22:03 +01:00
Francois Ferrand 3c37a63b75
Fix wrong computation for disk available
Issue: CLDSRV-149
2022-03-21 09:16:18 +01:00
Francois Ferrand f8fc95b778
Remove local data storage graph
Display bigger panel form buckets and objects count, and add ingestion
rate panels (both obj/s and byte/s).

Issue: CLDSRV-149
2022-03-21 09:13:31 +01:00
Francois Ferrand 170d0b2034
Fix computation for injection rate panel
Use "mean" value, which is more relevant for a "rate", and limit to 1 decimal.

Issue: CLDSRV-149
2022-03-21 09:13:31 +01:00
Francois Ferrand 6a32c8a0cb
Use number of objects/buckets from last report
The value from report is now always exposed. The logic of adding report
and dynamic operations would work in a single-instance scenario; but
things cannot "just" be added when it gets distributed over multiple
pods: there must be a single report handler, all other instances will
report only their own operations, and there is no safe way to combine
all the metrics together.

For now, just use the value from report handler, so the value is roughly
correct, although with some delay. Also expose the timestamp of the
report, so this delay may be displayed (or eventually used to improve
computations).

Issue: CLDSRV-149
2022-03-21 09:13:31 +01:00
Francois Ferrand 0700006207
Use piechart for top10 panels
Issue: CLDSRV-149
2022-03-21 09:13:31 +01:00
Francois Ferrand 403e1484cd
Fix labels & description for overview panels
Issue: CLDSRV-149
2022-03-21 09:13:31 +01:00
Francois Ferrand 5ceba2dd87
Merge remote-tracking branch 'origin/bugfix/CLDSRV-149' into w/8.4/bugfix/CLDSRV-149 2022-03-18 13:36:56 +01:00
Francois Ferrand 79fa3254ae
Show status of report-handler
Issue: CLDSRV-149
2022-03-17 15:54:36 +01:00
Francois Ferrand b69ccf28f7
Remove local data storage graph
Display bigger panel form buckets and objects count, and add ingestion
rate panels (both obj/s and byte/s).

Issue: CLDSRV-149
2022-03-17 15:43:31 +01:00
Francois Ferrand e854f6ccac
Fix computation for injection rate panel
Use "mean" value, which is more relevant for a "rate", and limit to 1 decimal.

Issue: CLDSRV-149
2022-03-17 14:54:45 +01:00
Francois Ferrand 3274009341
Use number of objects/buckets from last report
The value from report is now always exposed. The logic of adding report
and dynamic operations would work in a single-instance scenario; but
things cannot "just" be added when it gets distributed over multiple
pods: there must be a single report handler, all other instances will
report only their own operations, and there is no safe way to combine
all the metrics together.

For now, just use the value from report handler, so the value is roughly
correct, although with some delay. Also expose the timestamp of the
report, so this delay may be displayed (or eventually used to improve
computations).

Issue: CLDSRV-149
2022-03-17 13:30:11 +01:00
Francois Ferrand bde2df02f6
Use piechart for top10 panels
Issue: CLDSRV-149
2022-03-17 13:29:53 +01:00
Francois Ferrand fc44bc9e97
Fix labels & description for overview panels
Issue: CLDSRV-149
2022-03-17 13:29:53 +01:00
Alexandre Lavigne 6012e5c9b2
CLDSRV-148 - set only required inputs 2022-03-17 13:16:58 +01:00
Alexandre Lavigne 0dafa7c665
CLDSRV-148 - test alerts
Run action `action-prom-render-test` to test alerts
2022-03-17 13:15:42 +01:00
bert-e 2de0086801 Merge branch 'feature/CLDSRV-134-integrate-new-bucket-format' into tmp/octopus/w/8.4/feature/CLDSRV-134-integrate-new-bucket-format 2022-03-16 11:42:17 +00:00
Kerkesni 0d8fe7ba5e
feature: CLDSRV-134 add tests for mixed bucket formats
Added functional tests for testing if s3 operations respect
the bucket vFormat when having buckets in different formats
2022-03-16 12:08:05 +01:00
Kerkesni f3db9b8ed3
feature: CLDSRV-134 Added CI steps to run tests in both formats
Added CI steps to run functional tests in both bucket metadata formats
2022-03-16 12:08:05 +01:00
Kerkesni 6ad4f03da0
feature: CLDSRV-134 Bump Arsenal to 8.1.34 2022-03-16 12:08:01 +01:00
KillianG b18b5ba14c
CLDSRV-146: bump cloudserver version 2022-03-11 11:52:30 +01:00
Francois Ferrand ffb8ed7a5b Remove type annotations
The version of flake8 in Eve's worker does not seem to support
type annotations, and fails with: `E999 SyntaxError: invalid syntax`.

Issue: CLDSRV-147
2022-03-10 11:43:22 +01:00
Francois Ferrand c90d36de02 Make flake8 compliant
Issue: CLDSRV-147
2022-03-10 11:43:20 +01:00
Francois Ferrand a76709474d Migrate dashboard to grafanalib
Issue: CLDSRV-147
2022-03-10 11:42:33 +01:00
bert-e 12bcb1088c Merge branch 'improvement/CLDSRV-143-bump-package-to-8.3.12' into q/8.3 2022-03-01 08:03:56 +00:00
bert-e 0c7f4fd832 Merge branches 'w/8.4/improvement/CLDSRV-143-bump-package-to-8.3.12' and 'q/4311/8.3/improvement/CLDSRV-143-bump-package-to-8.3.12' into tmp/octopus/q/8.4 2022-03-01 08:03:56 +00:00
bert-e f945a77e50 Merge branches 'w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' and 'q/4314/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/q/8.3 2022-02-28 18:20:36 +00:00
bert-e 952b98e9c2 Merge branches 'w/8.4/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' and 'q/4314/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/q/8.4 2022-02-28 18:20:36 +00:00
bert-e 17425df1aa Merge branches 'w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' and 'q/4314/7.10/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/q/8.2 2022-02-28 18:20:35 +00:00
bert-e 326b825ba9 Merge branch 'w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-28 17:54:06 +00:00
bert-e 9e8459bd90 Merge branch 'w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.4/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-28 17:54:06 +00:00
Ronnie Smith 921e68f03a
feature: CLDSRV-144 use array iterable over for loop 2022-02-28 09:53:33 -08:00
bert-e 74e2bb251f Merge branch 'w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 03:05:38 +00:00
bert-e d1c5b75aa1 Merge branch 'w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.4/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 03:05:38 +00:00
bert-e c205437587 Merge branch 'w/7.10/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 03:05:37 +00:00
bert-e 32b1369772 Merge branch 'w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 01:42:52 +00:00
bert-e 7f64198565 Merge branch 'w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.4/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 01:42:52 +00:00
bert-e de6d6cf8d2 Merge branch 'w/7.10/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 01:42:51 +00:00
bert-e e54edce1fe Merge branch 'w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.4/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 00:31:26 +00:00
bert-e 59027d1790 Merge branch 'w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.3/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 00:31:25 +00:00
bert-e d540133ab7 Merge branch 'w/7.10/bugfix/CLDSRV-144/do-not-allow-non-printable-chars' into tmp/octopus/w/8.2/bugfix/CLDSRV-144/do-not-allow-non-printable-chars 2022-02-26 00:31:25 +00:00
williamlardier 0e13475285
Merge remote-tracking branch 'origin/improvement/CLDSRV-143-bump-package-to-8.3.12' into w/8.4/improvement/CLDSRV-143-bump-package-to-8.3.12 2022-02-25 14:02:56 +01:00
williamlardier 8760f955c4
CLDSRV-143: bump package.json to v8.3.12 2022-02-25 14:01:28 +01:00
williamlardier a7b436f05c
Merge remote-tracking branch 'origin/bugfix/CLDSRV-139-allow-extending-re-period-in-governance-without-bypass-header' into w/8.4/bugfix/CLDSRV-139-allow-extending-re-period-in-governance-without-bypass-header 2022-02-25 13:36:53 +01:00
williamlardier d6dac34024
CLDSRV-193: bump version in package.json 2022-02-25 12:35:53 +01:00
williamlardier 1b02a4a8d2
CLDSRV-193: allow extension of ret period without header 2022-02-25 12:35:53 +01:00
bert-e 0778f1b342 Merge branch 'w/8.3/feature/CLDSRV-102/aborted_mpu_ghost_put' into tmp/octopus/w/8.4/feature/CLDSRV-102/aborted_mpu_ghost_put 2022-02-24 19:47:57 +00:00
bert-e e3faf94954 Merge branch 'w/8.2/feature/CLDSRV-102/aborted_mpu_ghost_put' into tmp/octopus/w/8.3/feature/CLDSRV-102/aborted_mpu_ghost_put 2022-02-24 19:47:57 +00:00
Taylor McKinnon 80b686eff3 Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-102/aborted_mpu_ghost_put' into w/8.2/feature/CLDSRV-102/aborted_mpu_ghost_put 2022-02-24 11:45:00 -08:00
bert-e 793c46936b Merge branch 'feature/CLDSRV-128' into tmp/octopus/w/8.4/feature/CLDSRV-128 2022-02-21 13:41:34 +00:00
Francois Ferrand 6e66deffa0 Introduce Cloudserver alerts tests
Issue: CLDSRV-128
2022-02-17 12:25:31 +01:00
Xin LI 274838a953 improvement: CLDSRV-125-typo-fix 2022-02-09 16:45:10 +01:00
Xin LI 033e3cb725 improvement: CLDSRV-125-modify-search_bucket.js-to-support-sessionUser 2022-02-09 15:34:58 +01:00
bert-e bf1357b3cc Merge branch 'improvement/CLDSRV-123-bump-version-8.4.1' into q/8.4 2022-02-09 10:57:54 +00:00
Xin LI ea864cd432 improvement: CLDSRV-123-bump-version-8.4.2 2022-02-09 11:13:51 +01:00
bert-e 2bba7f22bb Merge branches 'w/8.4/improvement/CLDSRV-49/fix-test' and 'q/4257/8.3/improvement/CLDSRV-49/fix-test' into tmp/octopus/q/8.4 2022-02-09 10:00:42 +00:00
bert-e b0e0d3b43e Merge branch 'improvement/CLDSRV-49/fix-test' into q/8.2 2022-02-09 10:00:41 +00:00
bert-e 8948de1945 Merge branches 'w/8.3/improvement/CLDSRV-49/fix-test' and 'q/4257/8.2/improvement/CLDSRV-49/fix-test' into tmp/octopus/q/8.3 2022-02-09 10:00:41 +00:00
Nicolas Humbert 117ef04e27 metadataSearch implemented 2022-02-09 10:36:43 +01:00
bert-e fb04099921 Merge branch 'w/8.3/improvement/CLDSRV-49/fix-test' into tmp/octopus/w/8.4/improvement/CLDSRV-49/fix-test 2022-02-09 09:34:03 +00:00
Nicolas Humbert 7d8ddc08f1 Merge remote-tracking branch 'origin/improvement/CLDSRV-49/fix-test' into w/8.3/improvement/CLDSRV-49/fix-test 2022-02-09 10:04:09 +01:00
Nicolas Humbert 7bb77208a8 CLDSRV-49 handle metadataSearch before implementation 2022-02-09 09:59:32 +01:00
Xin LI 547562956b improvement: CLDSRV-123-bump-version-8.4.1 2022-02-08 19:13:33 +01:00
Nicolas Humbert a622dc9a07 Merge remote-tracking branch 'origin/w/8.3/feature/CLDSRV-49/UpgradeToNode16-clean' into w/8.4/feature/CLDSRV-49/UpgradeToNode16-clean 2022-02-08 18:32:16 +01:00
Xin LI 3795552534 improvement: CLDSRV-122-bump-version-8.4.0 2022-02-08 17:40:33 +01:00
Nicolas Humbert 964fb0175a fix: event close is always emitted 2022-02-08 17:25:51 +01:00
Nicolas Humbert 0ecfa9a2e2 Merge remote-tracking branch 'origin/w/8.2/feature/CLDSRV-49/UpgradeToNode16-clean' into w/8.3/feature/CLDSRV-49/UpgradeToNode16-clean 2022-02-08 17:20:52 +01:00
bert-e 2f503ac62e Merge branch 'feature/CLDSRV-112-add-mdsearch-api' into q/8.4 2022-02-08 16:10:11 +00:00
Nicolas Humbert 686994075c fix: NodeJS 16 upgrade 2022-02-08 16:10:58 +01:00
Xin LI 7c511c7f76 feature: CLDSRV-112-add-new-mdsearch-api(copy from bucketGet) 2022-02-08 14:38:40 +01:00
Nicolas Humbert 0a4542b990 Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-49/UpgradeToNode16-clean' into w/8.2/feature/CLDSRV-49/UpgradeToNode16-clean 2022-02-08 14:03:39 +01:00
bert-e c4093d5c9a Merge branch 'bugfix/CLDSRV-120' into q/8.3 2022-02-04 09:05:02 +00:00
bert-e 457d447dd6 Merge branch 'w/8.2/bugfix/CLDSRV-111-log-client-ip-correctly-stabilization' into tmp/octopus/w/8.3/bugfix/CLDSRV-111-log-client-ip-correctly-stabilization 2022-02-03 19:40:53 +00:00
Naren 95fe7d9a8e Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-111-log-client-ip-correctly-stabilization' into w/8.2/bugfix/CLDSRV-111-log-client-ip-correctly-stabilization 2022-02-03 11:21:34 -08:00
Francois Ferrand d2aa29d03a Display separate in/out scales for bandwidth
Display 'in' bandwidth on the left, and 'out' bandwidth on the right.

Issue: CLDSRV-120
2022-02-03 15:46:12 +01:00
Francois Ferrand 23fa11de6c Fix datasource in success rate panel
Issue: CLDSRV-120
2022-02-03 15:17:05 +01:00
Naren 8fbba4e3c4 Merge remote-tracking branch 'origin/w/8.2/bugfix/CLDSRV-111-log-client-ip-correctly' into w/8.3/bugfix/CLDSRV-111-log-client-ip-correctly 2022-02-02 23:11:35 -08:00
Naren 3fc455ec18 bf: CLDSRV-111 skip buggy gcp test 2022-02-02 22:35:32 -08:00
Naren a6224e87ce bf: CLDSRV-111 use arsenal v8.1.25 2022-02-02 14:40:37 -08:00
Naren 114b4c9560 bf: CLDSRV-111 override dependency resolutions
override dependencies resolutions for ioctl, npmlog and npmcli/fs, this
is done to avoid ioctl, fnctl compilation issues with node v10.
2022-02-02 12:52:52 -08:00
Naren b05d723640 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-111-log-client-ip-correctly' into w/8.2/bugfix/CLDSRV-111-log-client-ip-correctly 2022-02-02 12:51:47 -08:00
bert-e 72617f4ab1 Merge branch 'feature/CLDSRV-110' into tmp/octopus/w/8.3/feature/CLDSRV-110 2022-01-27 15:32:52 +00:00
Francois Ferrand f567903ce3 Relax client health check
Fail the check (both on startup and /ready route) only if all backends
are failing: so that the working backends can still be used.

Issue: CLDSRV-110
2022-01-27 15:06:36 +01:00
Alexander Chan 122b4ed89f fix merge conflict 2022-01-24 21:07:40 -08:00
Alexander Chan dd55f09be8 Merge remote-tracking branch 'origin/w/8.2/bugfix/CLDSRV-105/request-times-out-with-big-header' into w/8.3/bugfix/CLDSRV-105/request-times-out-with-big-header 2022-01-24 18:25:28 -08:00
Alexander Chan 8428404fb3 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-105/request-times-out-with-big-header' into w/8.2/bugfix/CLDSRV-105/request-times-out-with-big-header 2022-01-24 16:59:14 -08:00
Naren 107fc53b68 improvement: CLDSRV-109 bump version to 8.3.10 2022-01-21 16:29:36 -08:00
Naren ffd1fcd2c1 Merge remote-tracking branch 'origin/improvement/CLDSRV-108-bump-to-8-2-19' into w/8.3/improvement/CLDSRV-108-bump-to-8-2-19 2022-01-21 15:30:01 -08:00
Naren 6ebe0d3211 improvement: CLDSRV-108 bump version to 8.2.19 2022-01-21 15:07:21 -08:00
bert-e c3c408688e Merge branch 'w/8.2/bugfix/CLDSRV-106-put-object-retention-with-sub-seconds-fail' into tmp/octopus/w/8.3/bugfix/CLDSRV-106-put-object-retention-with-sub-seconds-fail 2022-01-21 17:11:56 +00:00
Naren b1f013ad5e Merge remote-tracking branch 'origin/bugfix/CLDSRV-106-put-object-retention-with-sub-seconds-fail' into w/8.2/bugfix/CLDSRV-106-put-object-retention-with-sub-seconds-fail 2022-01-21 08:49:16 -08:00
bert-e 3dab88a658 Merge branch 'w/8.2/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put' into tmp/octopus/w/8.3/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put 2022-01-17 17:31:58 +00:00
Artem Bakalov baf4b87b84 Updated Yarn lockfile 2022-01-17 09:30:35 -08:00
bert-e dc6bd4bfab Merge branch 'w/8.2/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put' into tmp/octopus/w/8.3/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put 2022-01-17 17:13:30 +00:00
Artem Bakalov 19b31f16fc Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put' into w/8.2/bugfix/CLDSRV-96-forward-408-errors-to-client-on-put 2022-01-17 09:12:42 -08:00
Francois Ferrand cde2a224ef Bump version to 8.3.9
Issue: CLDSRV-99
2022-01-17 13:43:27 +01:00
Francois Ferrand 2bf2df789f Fix type of alerts' config params
Issue: CLDSRV-99
2022-01-17 13:43:27 +01:00
Francois Ferrand 485f5159a5 Fix typo in "aggregated status over time" panel
Issue: CLDSRV-98
2022-01-13 18:00:00 +01:00
Alexandre Lavigne ed8ebb5c7f
CLDSRV-77 - bump version to 8.3.8 2022-01-12 16:58:04 +01:00
Alexandre Lavigne fcb5ae28ed
CLDSRV-89 - revert push server health check route
Revert the route used to check the health of the push server.
This route is not being updated in zenko-operator, so it should
not change to make sure it matches what the operator deploys
2022-01-12 14:29:28 +01:00
Alexandre Lavigne 7b67328f95
CLDSRV-89 - specify health check port in documentation 2022-01-12 14:29:28 +01:00
Alexandre Lavigne 38228044c8
CLDSRV-89 - rename route `healthcheck` to `live`
Rename the route `/healthcheck` to `/live` to match
kubernetes probe name *liveness*.

Update the route `/healthcheck/deep` to `/ready` to match
kubernetes probe name *readiness*
2022-01-12 14:29:28 +01:00
Alexandre Lavigne af872f5452
CLDSRV-89 - update doc, tests to new health check route
Add branch `user/*` to trigger a CI run, in order for everyone
to be able to run CI on personal branches.
2022-01-12 14:29:28 +01:00
Alexandre Lavigne 776367dcf4
CLDSRV-89 - update liveness/readiness path
Use the shorter path `/healthcheck`, use the dedicated function `healthckeckhandler`
with it.
2022-01-12 14:29:28 +01:00
Alexandre Lavigne af0c42863d
ZENKO-3725 - update kubernetes readiness/liveness probe route
Update the readiness and liveness route for kubernetes.
The operator will configure the pod to check for readiness
and liveness on the same port as for the metrics.

In this commit allow the new routes on the metric code path.
2022-01-12 14:29:28 +01:00
Killian Gardahaut 421c2117cf
Merge remote-tracking branch 'origin/improvement/CLDSRV-91-bump-arsenal-version' into w/8.3/improvement/CLDSRV-91-bump-arsenal-version 2022-01-11 16:42:32 +01:00
Killian Gardahaut c3385a4225
CLDSRV-93 Disable multiple backend tests 2022-01-11 16:40:58 +01:00
Killian Gardahaut 18594575bc
CLDSRV-91 update cloudserver version 2022-01-11 11:43:18 +01:00
Killian Gardahaut b5c94ffce9
CLDSRV-91 bump arsenal version 2022-01-11 11:38:59 +01:00
Xin LI a74a9e6361 improvement: CLDSRV-90-bump-version-8.3.7 2022-01-06 18:54:22 +01:00
bert-e a0f4e31839 Merge branch 'bugfix/CLDSRV-86-fix-versionID' into q/8.3 2022-01-06 14:02:35 +00:00
williamlardier 937778b64c
bugfix: CLDSRV-86 bump arsenal and vaultclient 2022-01-06 11:45:27 +01:00
bert-e 8e252ac5cb Merge branch 'feature/CLDSRV-74' into q/8.3 2022-01-06 09:24:27 +00:00
Francois Ferrand 1997c5831d Display the "current" http requests count stat
Used to display the "total" count, show only the increase over current
time range.

Issue: CLDSRV-74
2022-01-04 12:43:30 +01:00
bert-e ca3ac649a6 Merge branch 'feature/CLDSRV-87' into q/8.3 2022-01-04 11:40:16 +00:00
Xin LI 4fa7efefce bugfix: CLDSRV-88-bump version 8.3.6 2022-01-03 15:28:15 +01:00
Francois Ferrand 8bd3ff5533 Make alerts configurable
Issue: CLDSRV-87
2021-12-31 15:54:56 +01:00
Francois Ferrand e13911a096 Display top 10 buckets with errors
Separate panels for 404, 500 and 50X errors.

Issue: CLDSRV-74
2021-12-29 17:47:02 +01:00
Francois Ferrand 44f0855057 Improve data rate display
- Use non-SI byte unit (kB/s, MB/s...)
- Fix capitalization of label
- Display upload packet size distribution
- Display download packet size distribution

Issue: CLDSRV-74
2021-12-29 17:47:02 +01:00
Francois Ferrand 39b649b3ef Improve latency display
- Display overall and per-action latencies on the same graph
- Add heatmap showing the request duration "repartition" over time

Issue: CLDSRV-74
2021-12-29 17:47:02 +01:00
Francois Ferrand 5844beca12 Improve response code graphs
- Compute the "increase" instead of "rate", to display the actual number
of operations
- Use "multi" tooltips, to allow viewing the value under cursor
- Use separate colors and stack series for aggregated statuses
- Fill graphs & use smooth lines

Issue: CLDSRV-74
2021-12-29 16:01:24 +01:00
Francois Ferrand 9101bb94ae Add overview panels on top
- "up" replicas counter
- "Total requests" counter
- Number of buckets & objects
- Data disk storage
- Data & object injection rate
- Responses counter by category (200, 4xx, 5xx)

Issue: CLDSRV-74
2021-12-29 16:01:21 +01:00
bert-e e426635c1f Merge branch 'bugfix/CLDSRV-83' into q/8.3 2021-12-28 19:15:07 +00:00
Francois Ferrand 96115259c7 Add description to all alerts
Issue: CLDSRV-83
2021-12-28 10:48:49 +01:00
Francois Ferrand df271f7aa9 Fix typo in variable name
Request time is measured in nanosecond, fix name to reflect the correct
unit.

Issue: CLDSRV-83
2021-12-28 09:01:57 +01:00
Francois Ferrand f548ea8810 Generate average latency alerts
Issue: CLDSRV-83
2021-12-27 17:54:08 +01:00
Francois Ferrand 653f73e257 Use only deleteObject in DeleteLatencyWarning
Ignore deleteBucket and multiObjectDelete, to better match requirements and
avoid unsignificant alerts (as these multi-object delete operations can
take longer, esp. when many objects are deleted in a single command).

Issue: CLDSRV-83
2021-12-27 17:43:16 +01:00
Francois Ferrand 37cbb69ab5 Use only listBucket in ListingLatencyWarning
Ignore getService (e.g. listing buckets), to better match requirements.

Issue: CLDSRV-83
2021-12-27 17:41:06 +01:00
Xin LI 2a859a8c7c bugfix: CLDSRV-85-revert some parts 2021-12-27 16:47:57 +01:00
Xin LI 187ed092fa improvement: CLDSRV-84-bump-to-8.3.5 2021-12-27 14:21:04 +01:00
Xin LI b97ce84204 improvement: CLDSRV-84-bump-up-8.3.4 2021-12-27 12:45:08 +01:00
bert-e 9667339bee Merge branch 'improvement/CLDSRV-79-support-checkPolicy-for-session-user' into q/8.3 2021-12-23 12:57:18 +00:00
bert-e 91b1078870 Merge branch 'w/8.3/bugfix/CLDSRV-43/addMissingLifecycleHeaders' into tmp/octopus/q/8.3 2021-12-23 11:26:13 +00:00
Xin LI 7ae4fba220 improvement: CLDSRV-79-replace all iamuser check to nonaccount user check 2021-12-22 15:15:35 +01:00
Alexandre Lavigne d4d8537935
CLDSRV-80 - use git tag to tag dashboard during release 2021-12-20 11:18:57 +01:00
Kerkesni 7c80be1136
improvement: CLDSRV-76 replace vault with cloudserver in release docs 2021-12-17 14:18:46 +01:00
Kerkesni bb89dd5919
improvement: CLDSRV-76 improved release docs 2021-12-17 13:51:10 +01:00
Kerkesni 9d8a2cbe81
improvement: CLDSRV-76 bump cloudserver version 2021-12-17 13:49:59 +01:00
Walid El Ansari d96b098bc1
CLDSRV-78-Add workdir 2021-12-17 10:42:29 +01:00
Walid El Ansari 9c619364fc CLDSRV-78-fix ci release 2021-12-16 18:38:30 +01:00
Francois Ferrand f6dbe23504 De-duplicate list of dashboard image layers
Issue: CLDSRV-72
2021-12-16 15:59:54 +01:00
Alexandre Lavigne e954e03dbb CLDSRV-72 - upload alerts to registry 2021-12-16 15:51:58 +01:00
Francois Ferrand 6e3b2962c8 Add overall cloudserver status alert
Issue: CLDSRV-75
2021-12-16 11:30:59 +01:00
bert-e a64a7a31e9 Merge branch 'feature/CLDSRV-65_create_dashboard' into q/8.3 2021-12-15 13:38:51 +00:00
Alexandre Lavigne 43ed45e264
CLDSRV-65 - push dashboard to registry
Add a script that pushes the dashboard to the registry.
One can override, using env variables:
- the registry URL
- the registry project
2021-12-15 14:00:33 +01:00
Alexandre Lavigne 72817d5f78
CLDSRV-65 - add cloudserver dashboard
Created the dashboard using grafana UI.
then go to setting, JSON model, copy/paste the content here.
2021-12-15 14:00:33 +01:00
bert-e cd5231a397 Merge branch 'bugfix/CLDSRV-73_request_time' into q/8.3 2021-12-15 12:09:47 +00:00
Alexandre Lavigne 903c6d9be9
CLDSRV-73 - convert nanoseconds to seconds
during metric measurement we get the request time in nanoseconds.
Convert it to seconds, as per the standard from prometheus, by
dividing it by 1000000000.
2021-12-15 11:53:44 +01:00
bert-e 844345483a Merge branch 'feature/CLDSRV-70-alerts' into q/8.3 2021-12-10 14:58:30 +00:00
Francois Ferrand 60c149d4b3 Add tests for http request & response size 2021-12-10 14:52:32 +01:00
Francois Ferrand d16e6d9553 Do not use 'or' in unit test assertions
We know the behavior, simply check what we expect.

Issue: CLDSRV-71
2021-12-10 14:07:17 +01:00
Francois Ferrand fd9f8b8507 Remove 'it' from testcase names
Issue: CLDSRV-71
2021-12-10 14:06:33 +01:00
Francois Ferrand 0c5823407e Fix typo in action name
Issue: CLDSRV-71
2021-12-10 12:08:51 +01:00
Francois Ferrand cf6b160744 Add cloudserver alerting rules
The alerts will be deployed by Zenko-Operator as PrometheusRule
objects, after "resolving" the input variables.

Issue: CLDSRV-70
2021-12-10 09:29:21 +01:00
bert-e 96ee301762 Merge branch 'w/8.2/bugfix/CLDSRV-43/addMissingLifecycleHeaders' into tmp/octopus/w/8.3/bugfix/CLDSRV-43/addMissingLifecycleHeaders 2021-12-09 01:08:46 +00:00
saitharunthodupunuru 21be0940fe Merge branch bugfix/CLDSRV-43/addMissingLifecycleHeaders into w/8.2/bugfix/CLDSRV-43/addMissingLifecycleHeaders 2021-12-08 16:29:04 -08:00
williamlardier 271ab5d4bd
improvement: CLDSRV-69 release 8.3.2 2021-12-08 15:36:12 +01:00
bert-e 20a75ab111 Merge branch 'w/8.2/improvement/CLDSRV-60-cleanupReplayIdOfPreviousVersions' into tmp/octopus/w/8.3/improvement/CLDSRV-60-cleanupReplayIdOfPreviousVersions 2021-12-02 02:02:23 +00:00
Jonathan Gramain 57fffbabb7 Merge remote-tracking branch 'origin/w/7.10/improvement/CLDSRV-60-cleanupReplayIdOfPreviousVersions' into w/8.2/improvement/CLDSRV-60-cleanupReplayIdOfPreviousVersions 2021-12-01 17:27:19 -08:00
williamlardier 41635b82c3
improvement: CLDSRV bump arsenal version 2021-11-30 15:32:04 +01:00
bert-e 744d1e5d43 Merge branches 'w/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' and 'q/4071/7.10/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/q/8.2 2021-11-29 23:15:44 +00:00
bert-e a563245dc8 Merge branches 'w/8.3/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' and 'q/4071/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/q/8.3 2021-11-29 23:15:44 +00:00
bert-e 63d71560ba Merge branch 'feature/CLDSRV-63-metrics' into q/8.3 2021-11-26 16:00:58 +00:00
Francois Ferrand ba5ded8b30 Use high resolution clock to fix flacky tests
Issue: CLDSRV-63
2021-11-26 15:45:50 +01:00
Francois Ferrand 8d0d1370a5 Rename label "route" to "action"
It avoids the confusion with the actual route, and will be easier to
understand when looking at the metrics.

Issue: CLDSRV-63
2021-11-26 15:05:30 +01:00
Francois Ferrand 8ce7a63ea1 fixup! Add http active requests metrics 2021-11-26 13:56:08 +01:00
Francois Ferrand da3cc74f1a fixup! Add functionnal test 2021-11-25 15:52:56 +01:00
Francois Ferrand 7718d11706 fixup! Add functionnal test 2021-11-25 14:51:35 +01:00
Francois Ferrand 1285ab7497 fixup! Update prom-client to 14.0.1 2021-11-25 14:05:03 +01:00
Francois Ferrand 706d688985 Fix missing response on malformed requests
The server was not replying to a request (missing "resourceType") made
on backbeat or workflow-engine-operator internal routes.
2021-11-24 17:45:28 +01:00
Francois Ferrand 91fbc0fd61 Add functionnal test
Issue: CLDSRV-63
2021-11-24 17:18:14 +01:00
Francois Ferrand f3a3e442d1 Fix counting of http requests
All requests must be counted, includes rejected.

Issue: CLDSRV-63
2021-11-23 23:19:19 +01:00
Francois Ferrand 7f6fa22044 Update prom-client to 14.0.1
This is needed to support 'optional' labels.

Issue: CLDSRV-63
2021-11-23 23:19:16 +01:00
Francois Ferrand d3966151c6 Add unit tests
Issue: CLDSRV-63
2021-11-23 23:18:40 +01:00
Francois Ferrand 5c2076552b Add http active requests metrics
Issue: CLDSRV-63
2021-11-23 23:17:25 +01:00
Francois Ferrand 78f76476bf Fix error handling on prometheus endpoint
Issue: CLDSRV-63
2021-11-23 23:16:19 +01:00
Francois Ferrand 04b1642a04 Add http requests duration metrics
Issue: CLDSRV-63
2021-11-23 23:16:16 +01:00
bert-e 5986e207bb Merge branch 'w/7.10/bugfix/CLDSRV-66/fix_utapiv1_mpu_versioned_bucket' into tmp/octopus/w/8.2/bugfix/CLDSRV-66/fix_utapiv1_mpu_versioned_bucket 2021-11-23 22:02:45 +00:00
bert-e 6dcd86f719 Merge branch 'w/8.2/bugfix/CLDSRV-66/fix_utapiv1_mpu_versioned_bucket' into tmp/octopus/w/8.3/bugfix/CLDSRV-66/fix_utapiv1_mpu_versioned_bucket 2021-11-23 22:02:45 +00:00
bert-e e0801a7b75 Merge branches 'development/8.2' and 'w/7.10/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/w/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers 2021-11-23 17:05:40 +00:00
bert-e 647d8135b5 Merge branches 'development/8.3' and 'w/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/w/8.3/improvement/CLDSRV-61-refactorAndTestVersioningHelpers 2021-11-23 17:05:40 +00:00
Francois Ferrand 9c3440c954 Tweak http metrics
* Remove `cloud_server` prefix from standard metrics
* Remove `cloud_server` label
* Add a route label, to store the operation/api method

Issue: CLDSRV-63
2021-11-23 10:21:48 +01:00
bert-e 7d5af27c1e Merge branch 'q/4060/7.10/feature/CLDSRV-55/utapi_event_filtering' into tmp/normal/q/8.2 2021-11-19 01:48:23 +00:00
bert-e 2082cadf82 Merge branches 'w/8.3/feature/CLDSRV-55/utapi_event_filtering' and 'q/4060/8.2/feature/CLDSRV-55/utapi_event_filtering' into tmp/octopus/q/8.3 2021-11-19 01:48:23 +00:00
bert-e dea2204260 Merge branch 'w/8.2/feature/CLDSRV-55/utapi_event_filtering' into tmp/octopus/w/8.3/feature/CLDSRV-55/utapi_event_filtering 2021-11-18 23:33:06 +00:00
Taylor McKinnon 427061deb7 Merge remote-tracking branch 'origin/feature/CLDSRV-55/utapi_event_filtering' into w/8.2/feature/CLDSRV-55/utapi_event_filtering 2021-11-18 15:32:47 -08:00
bert-e d1b6ebf0dd Merge branch 'w/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/w/8.3/improvement/CLDSRV-61-refactorAndTestVersioningHelpers 2021-11-18 05:13:19 +00:00
bert-e ca4a788dcd Merge branch 'w/7.10/improvement/CLDSRV-61-refactorAndTestVersioningHelpers' into tmp/octopus/w/8.2/improvement/CLDSRV-61-refactorAndTestVersioningHelpers 2021-11-18 05:13:18 +00:00
bert-e 6da6a9acd4 Merge branches 'w/8.3/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' and 'q/4044/8.2/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' into tmp/octopus/q/8.3 2021-11-17 19:02:30 +00:00
bert-e 906731ff6d Merge branches 'w/8.2/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' and 'q/4044/7.10/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' into tmp/octopus/q/8.2 2021-11-17 19:02:29 +00:00
Rached Ben Mustapha b30265da43
chore: release 8.3.1 2021-11-15 09:37:34 -08:00
Rached Ben Mustapha f8235facdb
chore: bump arsenal version 2021-11-15 09:36:16 -08:00
bert-e 3f07fc3397 Merge branch 'improvement/CLDSRV-59-release-8.3.0' into q/8.3 2021-11-12 16:13:28 +00:00
Francois Ferrand 08fc544481 Bump version to 8.3.0
Issue: CLDSRV-59
2021-11-12 16:00:28 +01:00
bert-e dc2e6af001 Merge branch 'feature/CLDSRV-51-new-http-server-for-metrics' into q/8.3 2021-11-12 14:17:04 +00:00
Francois Ferrand 3e3352b798 Use port 8002 for cloudserver metrics
Issue: CLDSRV-51
2021-11-12 14:23:13 +01:00
Francois Ferrand fc9277589a Use /metrics path
As defined in https://scality.atlassian.net/wiki/spaces/OS/pages/1819148292/Strategy+for+exposing+metrics

Issue: CLDSRV-51
2021-11-12 14:23:00 +01:00
Jonathan Gramain aafc46dc66 Merge remote-tracking branch 'origin/w/8.2/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' into w/8.3/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete 2021-11-11 17:33:28 -08:00
bert-e 8c0cf823b8 Merge branches 'development/8.2' and 'w/7.10/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' into tmp/octopus/w/8.2/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete 2021-11-12 00:14:31 +00:00
Jonathan Gramain f50b81be78 CLDSRV-53 fixup path on 8.2 branch 2021-11-11 15:30:37 -08:00
bert-e c5a3d75622 Merge branch 'w/8.3/bugfix/CLDSRV-56_DoNotEncodeTokensAgain' into tmp/octopus/q/8.3 2021-11-11 22:55:07 +00:00
Jonathan Gramain f9d041b1fd Merge remote-tracking branch 'origin/w/7.10/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete' into w/8.2/improvement/CLDSRV-53-passReplayIdOnMPUCompleteAndDelete 2021-11-11 14:16:42 -08:00
bert-e 4a37347c9d Merge branch 'w/8.2/bugfix/CLDSRV-56_DoNotEncodeTokensAgain' into tmp/octopus/w/8.3/bugfix/CLDSRV-56_DoNotEncodeTokensAgain 2021-11-10 23:46:51 +00:00
bert-e 0083b7b719 Merge branch 'w/7.10/bugfix/CLDSRV-56_DoNotEncodeTokensAgain' into tmp/octopus/w/8.2/bugfix/CLDSRV-56_DoNotEncodeTokensAgain 2021-11-10 23:46:50 +00:00
Nicolas Humbert 2bec6c2143 CLDSRV-57 Artesca 1.3 will only support ingestion from S3C 2021-11-09 12:17:09 -05:00
bert-e 12354f20a4 Merge branch 'w/8.2/bugfix/CLDSRV-48-configure-utapi-redis-retry-main' into tmp/octopus/w/8.3/bugfix/CLDSRV-48-configure-utapi-redis-retry-main 2021-11-09 00:52:35 +00:00
Rached Ben Mustapha 92e78e6452 bugfix: utapi unit test 2021-11-09 00:52:24 +00:00
bert-e 65fb1cd9e5 Merge branch 'w/8.2/bugfix/CLDSRV-48-configure-utapi-redis-retry-main' into tmp/octopus/w/8.3/bugfix/CLDSRV-48-configure-utapi-redis-retry-main 2021-11-08 23:15:52 +00:00
Rached Ben Mustapha adc9b7bedd bugfix: utapi unit test 2021-11-08 23:15:33 +00:00
bert-e 998723fcdd Merge branch 'w/8.2/bugfix/CLDSRV-48-configure-utapi-redis-retry-main' into tmp/octopus/w/8.3/bugfix/CLDSRV-48-configure-utapi-redis-retry-main 2021-11-08 20:39:07 +00:00
Rached Ben Mustapha 1eda439539 Merge remote-tracking branch 'origin/bugfix/CLDSRV-48-configure-utapi-redis-retry-main' into w/8.2/bugfix/CLDSRV-48-configure-utapi-redis-retry-main 2021-11-08 20:25:27 +00:00
Francois Ferrand 102fed0403 Merge remote-tracking branch 'origin/development/8.3' into feature/CLDSRV-51-new-http-server-for-metrics 2021-11-08 13:55:50 +01:00
bert-e 9276d722db Merge branch 'w/8.3/bugfix/CLDSRV-54' into tmp/octopus/q/8.3 2021-11-06 08:23:03 +00:00
Ronnie Smith 506913b78d
feature: CLDSRV-46 Use patch locations from Arsena 2021-11-05 14:05:16 -07:00
Francois Ferrand 203c7ec4e7 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-54' into w/8.2/bugfix/CLDSRV-54 2021-11-05 10:33:23 +01:00
Francois Ferrand 2a1389666c Ensure to close all servers on shutdown
Issue: CLDSRV-51
2021-11-04 16:56:06 +01:00
Francois Ferrand 6ed87b5a67 Make metrics endpoint configurable
Issue: CLDSRV-51
2021-11-04 16:56:06 +01:00
Francois Ferrand 0db92e0a49 Expose metrics port in Docker image
Issue: CLDSRV-51
2021-11-04 16:56:06 +01:00
Francois Ferrand 47f109a5d6 Proper routing of metrics requests
Issue: CLDSRV-51
2021-11-04 16:56:06 +01:00
Jordi Bertran de Balanda 9ae98ce550 feature: CLDSRV-51 - spawn a new HTTP server to serve metrics 2021-11-04 10:39:44 +01:00
Jordi Bertran de Balanda e046884c3c feature: CLDSRV-51 - switch case for prometheus object updates 2021-11-04 10:39:06 +01:00
bert-e 24f9fa6a65 Merge branches 'w/8.2/bugfix/CLDSRV-45/add_crr_metrics_utapiv2' and 'q/4019/7.10/bugfix/CLDSRV-45/add_crr_metrics_utapiv2' into tmp/octopus/q/8.2 2021-11-02 21:51:38 +00:00
Taylor McKinnon c4d49607ae Merge remote-tracking branch 'origin/bugfix/CLDSRV-45/add_crr_metrics_utapiv2' into w/8.2/bugfix/CLDSRV-45/add_crr_metrics_utapiv2 2021-11-02 10:09:20 -07:00
bert-e 103f077176 Merge branch 'w/7.10/bugfix/CLDSRV-37-pushReplicationMetrics' into tmp/octopus/w/8.2/bugfix/CLDSRV-37-pushReplicationMetrics 2021-11-01 18:18:47 +00:00
bert-e e9731164a2 Merge branch 'feature/CLDSRV-41-bumpversion' into q/8.2 2021-10-29 16:07:52 +00:00
bert-e 487e2a1d1d Merge branch 'w/8.2/improvement/CLDSRV-44-upgrade-arsenal' into tmp/octopus/q/8.2 2021-10-29 15:29:03 +00:00
Francois Ferrand e0bfe70b21 Bump clouserver version to 8.2.17
Issue: CLDSRV-41
2021-10-29 15:24:03 +02:00
Rahul Padigela 88df896458 Merge remote-tracking branch 'origin/w/7.10/improvement/CLDSRV-44-upgrade-arsenal' into w/8.2/improvement/CLDSRV-44-upgrade-arsenal 2021-10-27 16:52:10 -07:00
Francois Ferrand 497188dac5 Do not use JSON.parse() to parse variable
Issue: CLDSRV-41
Issue: ARTESCA-2190
2021-10-27 13:42:57 +02:00
Francois Ferrand 501fbda5c2 Allow configuring CLDSRV-41 capability
Issue: CLDSRV-41
Issue: ARTESCA-2190
2021-10-25 09:23:25 +02:00
Naren 05ff4d91e2 improvement: CLDSRV-42 bump cloudserver version
bump cloudserver version to 8.2.16
2021-10-22 15:21:41 -07:00
Naren 643c201325 improvement: CLDSRV-26 bump arsenal version
bump arsenal version to 8.1.7
2021-10-22 14:05:59 -07:00
Nicolas Humbert c3c82e086b CLDSRV-40 bump version to 8.2.15 2021-10-20 15:28:04 -04:00
bert-e c0b64fa490 Merge branch 'bugfix/CLDSRV-39/arsenal' into q/8.2 2021-10-19 21:39:57 +00:00
Nicolas Humbert 9f84c8c6df CLDSRV-39 interrogate the default region for getBucketLocation 2021-10-19 16:31:12 -04:00
bert-e 2e9d691511 Merge branches 'w/8.2/bugfix/CLDSRV-36-backport-S3C-2775-to-7.4' and 'q/3948/7.10/bugfix/CLDSRV-36-backport-S3C-2775-to-7.4' into tmp/octopus/q/8.2 2021-10-15 20:51:05 +00:00
bert-e 6f84440bdd Merge branch 'w/7.10/bugfix/CLDSRV-36-backport-S3C-2775-to-7.4' into tmp/octopus/w/8.2/bugfix/CLDSRV-36-backport-S3C-2775-to-7.4 2021-10-14 21:06:22 +00:00
Alexandre Lavigne 580b9611df
CLDSRV-35 - update key server to a working key server 2021-10-14 17:11:29 +02:00
bert-e f423854d6b Merge branch 'w/7.10/bugfix/CLDSRV-35_update_aggressor_ci_image' into tmp/octopus/w/8.2/bugfix/CLDSRV-35_update_aggressor_ci_image 2021-10-14 15:08:08 +00:00
bert-e 2b19e2703c Merge branch 'w/8.2/bugfix/CLDSRV-30-deleteOldDataLocationsOnBackbeatPutMetadata' into tmp/octopus/q/8.2 2021-10-13 23:15:46 +00:00
Alexandre Lavigne 829a22fe94
CLDSRV-34 - fix time computation for test comparison
need a new date that is 30 minutes in the past.
Computation is based on a date already 30 minutes in the future,
take that date and rewind it by 60 minutes
- 30 to go back to the original data
- 30 to go back 30 minutes in the past
2021-10-13 14:37:53 +02:00
Jonathan Gramain f22f9bc0a1 CLDSRV-30 fix routeBackbeat logic on development/8.2 branch 2021-10-12 16:22:56 -07:00
Jonathan Gramain e27ecd89f5 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-30-deleteOldDataLocationsOnBackbeatPutMetadata' into w/8.2/bugfix/CLDSRV-30-deleteOldDataLocationsOnBackbeatPutMetadata 2021-10-12 16:22:30 -07:00
Naren 0bb7a918c0 improvement: ARSN-25 update version to 8.1.5 2021-10-12 13:41:02 -07:00
bert-e 6df2b1698d Merge branch 'q/3841/7.10/feature/CLDSRV-22-pin-arsenal' into tmp/normal/q/8.2 2021-10-08 17:45:17 +00:00
Thomas Carmet af00189942 Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-22-pin-arsenal' into w/8.2/feature/CLDSRV-22-pin-arsenal 2021-10-08 10:11:19 -07:00
bert-e c5be7e3741 Merge branches 'w/8.2/feature/CLDSRV-33-package-version-set' and 'q/3918/7.10/feature/CLDSRV-33-package-version-set' into tmp/octopus/q/8.2 2021-10-08 16:17:47 +00:00
bert-e bd39a80952 Merge branch 'w/8.2/bugfix/CLDSRV-31-disableKMSHealthcheck' into tmp/octopus/q/8.2 2021-10-07 20:22:35 +00:00
Thomas Carmet e1dd682495 Merge branch 'w/7.10/feature/CLDSRV-33-package-version-set' into w/8.2/feature/CLDSRV-33-package-version-set 2021-10-07 11:01:40 -07:00
Thomas Carmet 05eed1cf18 CLDSRV-22 stop counting on commit hash size for release 2021-10-06 17:20:14 -07:00
bert-e f7ce2931b4 Merge branch 'bugfix/CLDSRV-31-disableKMSHealthcheck' into tmp/octopus/w/8.2/bugfix/CLDSRV-31-disableKMSHealthcheck 2021-10-05 18:33:35 +00:00
bert-e 804f59d5f9 Merge branch 'bugfix/CLDSRV-28-put-bucket-notification-crashes-with-cross-origin-requests' into tmp/octopus/w/8.2/bugfix/CLDSRV-28-put-bucket-notification-crashes-with-cross-origin-requests 2021-09-30 19:28:50 +00:00
bert-e 0bbb40a7da Merge branch 'bugfix/CLDSRV-28-put-bucket-notification-crashes-with-cross-origin-requests' into tmp/octopus/w/8.2/bugfix/CLDSRV-28-put-bucket-notification-crashes-with-cross-origin-requests 2021-09-30 15:25:53 +00:00
Naren 41533ca80f improvement: bump version to 8.2.13 2021-09-28 16:06:08 -07:00
bert-e 8fae0bfa76 Merge branch 'w/7.10/feature/CLDSRV-20-pin-bucketclient' into tmp/normal/w/8.2/feature/CLDSRV-20-pin-bucketclient 2021-09-01 20:26:26 +00:00
bert-e bc2fad37c9 Merge branch 'development/8.2' into tmp/normal/w/8.2/feature/CLDSRV-20-pin-bucketclient 2021-09-01 20:26:26 +00:00
Thomas Carmet 1fc86f78b7 Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-20-pin-bucketclient' into w/8.2/feature/CLDSRV-20-pin-bucketclient 2021-08-31 12:59:16 -07:00
Thomas Carmet 8b9b1b0978 Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-19-bump-sproxydclient' into w/8.2/feature/CLDSRV-19-bump-sproxydclient 2021-08-30 09:42:01 -07:00
bert-e c03c33162f Merge branches 'w/8.2/bugfix/CLDSRV-18-set-default-region' and 'q/3825/7.10/bugfix/CLDSRV-18-set-default-region' into tmp/octopus/q/8.2 2021-08-27 19:51:26 +00:00
bert-e d10ef350ba Merge branches 'w/8.2/bugfix/CLDSRV-16/fixObjectLockComparisonCheck' and 'q/3818/7.10/bugfix/CLDSRV-16/fixObjectLockComparisonCheck' into tmp/octopus/q/8.2 2021-08-27 19:30:56 +00:00
Naren fa175e413e Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-18-set-default-region' into w/8.2/bugfix/CLDSRV-18-set-default-region 2021-08-27 11:37:16 -07:00
bert-e 0fdcbed429 Merge branch 'w/7.10/bugfix/CLDSRV-18-set-default-region' into tmp/octopus/w/8.2/bugfix/CLDSRV-18-set-default-region 2021-08-27 18:25:03 +00:00
Naren 901cfb18f0 bf: CLDSRV-15 use updated vaultclient
use correct uuid package.
2021-08-26 04:04:58 -07:00
bert-e 16ea051c70 Merge branch 'w/7.10/bugfix/CLDSRV-15-use-updated-vault-client' into tmp/octopus/w/8.2/bugfix/CLDSRV-15-use-updated-vault-client 2021-08-26 10:57:33 +00:00
Naren faf421de17 Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-15-use-updated-vault-client' into w/8.2/bugfix/CLDSRV-15-use-updated-vault-client 2021-08-26 02:40:41 -07:00
bert-e 8e5ee89ceb Merge branch 'w/7.10/bugfix/CLDSRV-16/fixObjectLockComparisonCheck' into tmp/octopus/w/8.2/bugfix/CLDSRV-16/fixObjectLockComparisonCheck 2021-08-26 01:12:17 +00:00
bert-e 977dbab35d Merge branch 'w/7.10/bugfix/CLDSRV-11_StabCherryPickWriteChecks' into tmp/octopus/w/8.2/bugfix/CLDSRV-11_StabCherryPickWriteChecks 2021-08-19 22:58:57 +00:00
Ronnie Smith cf95928e02
Merge branch 'w/8.2/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership' of github.com:scality/cloudserver into w/8.2/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership 2021-08-19 15:11:48 -07:00
Ronnie Smith 6a5aea842b
Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership' into w/8.2/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership 2021-08-19 15:09:58 -07:00
Ronnie Smith 7e41e5d85a
Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership' into w/8.2/bugfix/CLDSRV-11_RemoveExtraChecksForOwnership 2021-08-19 14:35:35 -07:00
bert-e 772993b2b8 Merge branches 'w/8.2/bugfix/CLDSRV-12-bucket-policy-resource-security-fix' and 'q/3780/7.10/bugfix/CLDSRV-12-bucket-policy-resource-security-fix' into tmp/octopus/q/8.2 2021-08-19 17:49:47 +00:00
naren-scality 384dfd854c Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-12-bucket-policy-resource-security-fix' into w/8.2/bugfix/CLDSRV-12-bucket-policy-resource-security-fix 2021-08-18 23:46:39 -07:00
bert-e 24894f6ed3 Merge branches 'w/8.2/bugfix/CLDSRV-11_PredefinedACLForWrite' and 'q/3782/7.10/bugfix/CLDSRV-11_PredefinedACLForWrite' into tmp/octopus/q/8.2 2021-08-17 23:58:26 +00:00
Ronnie Smith 0a441f8726
bugfix: CLDSRV-11 Check for service account on object delete 2021-08-16 15:51:00 -07:00
bert-e 240151e14d Merge branch 'w/7.10/bugfix/CLDSRV-11_PredefinedACLForWrite' into tmp/octopus/w/8.2/bugfix/CLDSRV-11_PredefinedACLForWrite 2021-08-16 22:35:01 +00:00
bert-e 7bcd88dc49 Merge branch 'w/7.10/bugfix/CLDSRV-10-get-acl-with-predefined-groups-stabilization' into tmp/octopus/w/8.2/bugfix/CLDSRV-10-get-acl-with-predefined-groups-stabilization 2021-08-14 00:45:31 +00:00
bert-e 654edb134a Merge branches 'w/8.2/feature/S3C-4726/log' and 'q/3765/7.10/feature/S3C-4726/log' into tmp/octopus/q/8.2 2021-08-13 20:15:18 +00:00
bert-e e6c83347f5 Merge branch 'w/7.10/feature/S3C-4726/log' into tmp/octopus/w/8.2/feature/S3C-4726/log 2021-08-13 19:36:06 +00:00
bert-e c066be2dab Merge branches 'w/8.2/bugfix/CLDSRV-10-get-acl-with-predefined-groups' and 'q/3769/7.10/bugfix/CLDSRV-10-get-acl-with-predefined-groups' into tmp/octopus/q/8.2 2021-08-13 16:30:31 +00:00
bert-e 6651519557 Merge branch 'w/7.10/bugfix/CLDSRV-10-get-acl-with-predefined-groups' into tmp/octopus/w/8.2/bugfix/CLDSRV-10-get-acl-with-predefined-groups 2021-08-12 09:24:38 +00:00
Thomas Carmet e8b54a707a Merge remote-tracking branch 'origin/w/7.10/feature/CLDSRV-9-upgrade-werelogs' into w/8.2/feature/CLDSRV-9-upgrade-werelogs 2021-08-06 11:16:53 -07:00
Jonathan Gramain 9372af75a3 Merge remote-tracking branch 'origin/improvement/CLDSRV-8-kmipDeepHealthcheck' into w/8.2/improvement/CLDSRV-8-kmipDeepHealthcheck 2021-08-04 18:19:13 -07:00
Ronnie Smith 859a68b158
Merge remote-tracking branch 'origin/bugfix/CLDSRV-7_RemoveHTTPCodeAndMessageFromLogs' into w/8.2/bugfix/CLDSRV-7_RemoveHTTPCodeAndMessageFromLogs 2021-08-03 14:05:48 -07:00
bert-e 19ddf81950 Merge branches 'w/8.2/bugfix/CLDSRV-6/403-errors' and 'q/3750/7.10/bugfix/CLDSRV-6/403-errors' into tmp/octopus/q/8.2 2021-08-03 19:56:37 +00:00
bert-e 2eeeb22138 Merge branch 'bugfix/CLDSRV-6/403-errors' into tmp/octopus/w/8.2/bugfix/CLDSRV-6/403-errors 2021-08-03 17:08:09 +00:00
Ronnie Smith 0739e5c04e
Merge remote-tracking branch 'origin/w/7.10/bugfix/CLDSRV-5_RemoveExtraHeadersFrom304Responses' into w/8.2/bugfix/CLDSRV-5_RemoveExtraHeadersFrom304Responses 2021-07-30 20:59:09 -07:00
bert-e 8b6f5a1ee3 Merge branches 'w/8.2/feature/CLDSRV-2/addLifecycleUID' and 'q/3734/7.10/feature/CLDSRV-2/addLifecycleUID' into tmp/octopus/q/8.2 2021-07-28 22:07:17 +00:00
Alexander Chan 11419e2937 Merge remote-tracking branch 'origin/feature/CLDSRV-2/addLifecycleUID' into w/8.2/feature/CLDSRV-2/addLifecycleUID 2021-07-28 14:16:48 -07:00
Nicolas Humbert 8e9441b923 CLDSRV-4 fix functional tests 2021-07-28 12:38:17 -04:00
bert-e 9cd7ce9278 Merge branch 'bugfix/CLDSRV-4/lifecycle' into tmp/octopus/w/8.2/bugfix/CLDSRV-4/lifecycle 2021-07-27 17:31:49 +00:00
bert-e 60c62d8332 Merge branches 'w/8.2/improvement/S3C-4312-backbeatEncryptionSupport' and 'q/3717/7.10/improvement/S3C-4312-backbeatEncryptionSupport' into tmp/octopus/q/8.2 2021-07-26 22:25:40 +00:00
Jonathan Gramain e52a69e6ef Merge remote-tracking branch 'origin/improvement/S3C-4312-backbeatEncryptionSupport' into w/8.2/improvement/S3C-4312-backbeatEncryptionSupport 2021-07-26 14:51:26 -07:00
bert-e 7054da645c Merge branch 'bugfix/CLDSRV-3-pykmipImageBuildFix' into tmp/octopus/w/8.2/bugfix/CLDSRV-3-pykmipImageBuildFix 2021-07-23 20:05:13 +00:00
bert-e 82efa99a8f Merge branches 'w/8.2/bugfix/S3C-4533-prevent-suspending-versioning-locked-bucket' and 'q/3715/7.10/bugfix/S3C-4533-prevent-suspending-versioning-locked-bucket' into tmp/octopus/q/8.2 2021-07-21 17:00:56 +00:00
anurag4DSB d19d6b4c18
Merge remote-tracking branch 'origin/bugfix/S3C-4533-prevent-suspending-versioning-locked-bucket' into w/8.2/bugfix/S3C-4533-prevent-suspending-versioning-locked-bucket 2021-07-21 18:25:20 +02:00
Jonathan Gramain 0f9bd86b37 ft(S3C-4508) 8.2: use S3KMS=file
set S3KMS=file in multiple backend and ceph tests to persist
encryption parameters in bucket and avoid exceptions
2021-07-20 18:35:42 -07:00
Jonathan Gramain df9fa85942 ft(S3C-4508) 8.2: update arsenal dep 2021-07-20 17:40:48 -07:00
Taylor McKinnon d8707561fc Merge remote-tracking branch 'origin/feature/S3C-4508_per_object_encryption_headers' into w/8.2/feature/S3C-4508_per_object_encryption_headers 2021-07-07 10:14:38 -07:00
bert-e 1c45364dc6 Merge branches 'w/8.2/feature/S3C-4551-tiny-version-ids' and 'q/3660/7.10/feature/S3C-4551-tiny-version-ids' into tmp/octopus/q/8.2 2021-07-06 18:35:18 +00:00
bert-e 8cf441cc50 Merge branches 'w/8.2/bugfix/S3C-4420/correctErrorResponse' and 'q/3636/7.10/bugfix/S3C-4420/correctErrorResponse' into tmp/octopus/q/8.2 2021-07-03 00:09:42 +00:00
bert-e f7387d729e Merge branches 'w/8.2/bugfix/S3C-4610_add_missing_utapi_header' and 'q/3676/7.10/bugfix/S3C-4610_add_missing_utapi_header' into tmp/octopus/q/8.2 2021-07-02 17:00:35 +00:00
bert-e 86ad4e4c6a Merge branch 'bugfix/S3C-4420/correctErrorResponse' into tmp/octopus/w/8.2/bugfix/S3C-4420/correctErrorResponse 2021-07-01 17:17:34 +00:00
vrancurel cc081768f6 fix aws test 2021-06-30 19:03:26 -07:00
vrancurel c7725254ce add config line 2021-06-30 17:19:44 -07:00
vrancurel 0ed73e0752 Merge remote-tracking branch 'origin/feature/S3C-4551-tiny-version-ids' into w/8.2/feature/S3C-4551-tiny-version-ids 2021-06-30 09:56:53 -07:00
bert-e 974a54bbe7 Merge branches 'w/8.2/feature/S3C-4569_x-amz-bucket-region_header' and 'q/3682/7.10/feature/S3C-4569_x-amz-bucket-region_header' into tmp/octopus/q/8.2 2021-06-30 00:57:52 +00:00
= ae72ae2c69 Merge remote-tracking branch 'origin/feature/S3C-4569_x-amz-bucket-region_header' into w/8.2/feature/S3C-4569_x-amz-bucket-region_header
merge
2021-06-29 16:43:30 -07:00
bert-e b6d81e2bff Merge branches 'w/8.2/improvement/S3C-4110/backport' and 'q/3659/7.10/improvement/S3C-4110/backport' into tmp/octopus/q/8.2 2021-06-29 17:25:24 +00:00
Nicolas Humbert ad073b2549 Merge remote-tracking branch 'origin/improvement/S3C-4110/backport' into w/8.2/improvement/S3C-4110/backport 2021-06-29 09:26:14 -04:00
Nicolas Humbert aec7068dc2 Merge remote-tracking branch 'origin/development/8.2' into w/8.2/improvement/S3C-4110/backport 2021-06-29 09:21:52 -04:00
bert-e 159d7b7fa4 Merge branch 'bugfix/S3C-4610_add_missing_utapi_header' into tmp/octopus/w/8.2/bugfix/S3C-4610_add_missing_utapi_header 2021-06-23 16:56:46 +00:00
bert-e e89e2ff705 Merge branch 'bugfix/S3C-4324-bucketPolicyAllowsNotificationActions' into tmp/octopus/w/8.2/bugfix/S3C-4324-bucketPolicyAllowsNotificationActions 2021-06-11 01:11:12 +00:00
Nicolas Humbert 39b412dd64 clean yarn.lock 2021-06-10 14:45:02 -05:00
Nicolas Humbert 77c9f485a1 Merge remote-tracking branch 'origin/improvement/S3C-4110/backport' into w/8.2/improvement/S3C-4110/backport 2021-06-10 14:19:27 -05:00
bert-e 9f9291b008 Merge branch 'feature/S3C-4382_add_expected_bucket_owner_support' into tmp/octopus/w/8.2/feature/S3C-4382_add_expected_bucket_owner_support 2021-05-19 22:24:26 +00:00
bert-e e7b27ea528 Merge branch 'w/7.10/bugfix/S3C-2663/correctErrorResponse' into tmp/octopus/w/8.2/bugfix/S3C-2663/correctErrorResponse 2021-05-18 02:03:59 +00:00
Taylor McKinnon 0c15f95932 Merge remote-tracking branch 'origin/feature/S3C-3755_add_bucketDeleteEncryption_handler' into w/8.2/feature/S3C-3755_add_bucketDeleteEncryption_handler 2021-05-17 11:24:17 -07:00
bert-e 1c0ccb51e4 Merge branches 'w/8.2/bugfix/S3C-3061-bucket-acl-using-predefined-groups' and 'q/3584/7.10/bugfix/S3C-3061-bucket-acl-using-predefined-groups' into tmp/octopus/q/8.2 2021-05-15 07:09:47 +00:00
bert-e e14a6e74fb Merge branch 'w/7.10/bugfix/S3C-3061-bucket-acl-using-predefined-groups' into tmp/octopus/w/8.2/bugfix/S3C-3061-bucket-acl-using-predefined-groups 2021-05-15 05:14:23 +00:00
bert-e d0cc88c232 Merge branch 'w/7.10/bugfix/S3C-3061-bucket-acl-using-predefined-groups' into tmp/octopus/w/8.2/bugfix/S3C-3061-bucket-acl-using-predefined-groups 2021-05-15 05:02:19 +00:00
Ilke cb500f6e3d Merge remote-tracking branch 'origin/bugfix/S3C-4358-support-versioned-obj-lock-requests' into w/8.2/bugfix/S3C-4358-support-versioned-obj-lock-requests 2021-05-13 17:08:54 -07:00
bert-e 22417386c1 Merge branches 'w/8.2/improvement/S3C-3727-deprecated-buffer-usage' and 'q/3218/8.1/improvement/S3C-3727-deprecated-buffer-usage' into tmp/octopus/q/8.2 2021-05-12 20:07:45 +00:00
bert-e edffa80691 Merge branches 'w/8.1/improvement/S3C-3727-deprecated-buffer-usage' and 'q/3218/7.10/improvement/S3C-3727-deprecated-buffer-usage' into tmp/octopus/q/8.1 2021-05-12 20:07:45 +00:00
bert-e fc089213b6 Merge branches 'w/8.2/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' and 'q/3603/8.1/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' into tmp/octopus/q/8.2 2021-05-12 19:56:46 +00:00
bert-e 01e84295c0 Merge branches 'w/8.1/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' and 'q/3603/7.10/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' into tmp/octopus/q/8.1 2021-05-12 19:56:45 +00:00
bert-e 40e329138e Merge branch 'w/8.1/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' into tmp/octopus/w/8.2/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0 2021-05-12 18:50:37 +00:00
bert-e 208a1ec456 Merge branch 'w/7.10/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0' into tmp/octopus/w/8.1/dependabot/npm_and_yarn/development/7.4/aws-sdk-2.905.0 2021-05-12 18:50:36 +00:00
bert-e ec19cafe48 Merge branch 'w/8.1/improvement/S3C-3727-deprecated-buffer-usage' into tmp/octopus/w/8.2/improvement/S3C-3727-deprecated-buffer-usage 2021-05-12 18:34:18 +00:00
bert-e 67413869a4 Merge branch 'w/7.10/improvement/S3C-3727-deprecated-buffer-usage' into tmp/octopus/w/8.1/improvement/S3C-3727-deprecated-buffer-usage 2021-05-12 18:34:18 +00:00
bert-e 3ea63d364c Merge branches 'w/8.2/improvement/S3C-4363-remove-node-uuid' and 'q/3585/8.1/improvement/S3C-4363-remove-node-uuid' into tmp/octopus/q/8.2 2021-05-11 17:49:52 +00:00
bert-e d963b38f0e Merge branches 'w/8.1/improvement/S3C-4363-remove-node-uuid' and 'q/3585/7.10/improvement/S3C-4363-remove-node-uuid' into tmp/octopus/q/8.1 2021-05-11 17:49:51 +00:00
bert-e bc60e8b08b Merge branches 'w/8.2/bugfix/S3C-3778-deduplicateCompleteMPU' and 'q/3577/8.1/bugfix/S3C-3778-deduplicateCompleteMPU' into tmp/octopus/q/8.2 2021-05-11 01:58:08 +00:00
bert-e a2fb0215fb Merge branches 'w/8.1/bugfix/S3C-3778-deduplicateCompleteMPU' and 'q/3577/7.10/bugfix/S3C-3778-deduplicateCompleteMPU' into tmp/octopus/q/8.1 2021-05-11 01:58:07 +00:00
bert-e 1b332a273c Merge branch 'w/8.1/improvement/S3C-4363-remove-node-uuid' into tmp/octopus/w/8.2/improvement/S3C-4363-remove-node-uuid 2021-05-11 00:33:31 +00:00
Rahul Padigela 6f56a6d215 improvement: S3C-4363 update uuid references 2021-05-10 17:32:57 -07:00
bert-e ec40c8a520 Merge branch 'w/8.1/improvement/S3C-4363-remove-node-uuid' into tmp/octopus/w/8.2/improvement/S3C-4363-remove-node-uuid 2021-05-11 00:07:25 +00:00
Rahul Padigela c9444fdbc4 improvement: S3C-4363 update uuid references 2021-05-10 17:06:12 -07:00
bert-e e534dc15c3 Merge branch 'w/8.1/improvement/dependabot-builds' into tmp/octopus/w/8.2/improvement/dependabot-builds 2021-05-10 23:34:27 +00:00
bert-e fd7fe45046 Merge branch 'w/7.10/improvement/dependabot-builds' into tmp/octopus/w/8.1/improvement/dependabot-builds 2021-05-10 23:34:27 +00:00
bert-e d008ccc3ec Merge branch 'w/8.1/improvement/dependabot-builds' into tmp/octopus/w/8.2/improvement/dependabot-builds 2021-05-10 22:21:05 +00:00
Rahul Padigela 9a69b5c00d Merge remote-tracking branch 'origin/w/7.10/improvement/dependabot-builds' into w/8.1/improvement/dependabot-builds 2021-05-10 15:03:11 -07:00
bert-e c313d201d4 Merge branch 'w/8.1/bugfix/S3C-3778-deduplicateCompleteMPU' into tmp/octopus/w/8.2/bugfix/S3C-3778-deduplicateCompleteMPU 2021-05-10 20:56:15 +00:00
Jonathan Gramain f03f6bf2c9 S3C-3778 8.1+ fix: disable sanity check for external backend
For external backends like AWS, the sanity check fails because it does
not support external locations: do not change the existing behavior
for external backends.
2021-05-10 13:55:51 -07:00
bert-e a7ab9ef522 Merge branch 'w/8.1/bugfix/S3C-3778-deduplicateCompleteMPU' into tmp/octopus/w/8.2/bugfix/S3C-3778-deduplicateCompleteMPU 2021-05-10 18:32:44 +00:00
Jonathan Gramain b98898dfe1 Merge remote-tracking branch 'origin/w/7.10/bugfix/S3C-3778-deduplicateCompleteMPU' into w/8.1/bugfix/S3C-3778-deduplicateCompleteMPU 2021-05-10 11:24:20 -07:00
bert-e 1624a1488b Merge branch 'w/8.1/improvement/S3C-4363-remove-node-uuid' into tmp/octopus/w/8.2/improvement/S3C-4363-remove-node-uuid 2021-05-10 15:12:43 +00:00
Rahul Padigela 6a94c7bf73 Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-4363-remove-node-uuid' into w/8.1/improvement/S3C-4363-remove-node-uuid 2021-05-10 08:07:09 -07:00
Rahul Padigela 9109cd1610 Set theme jekyll-theme-modernist 2021-05-07 17:44:39 -07:00
bert-e bc3dfbd315 Merge branches 'w/8.1/improvement/S3C-4336_move_ModelVersion' and 'q/3573/7.10/improvement/S3C-4336_move_ModelVersion' into tmp/octopus/q/8.1 2021-05-06 00:05:55 +00:00
bert-e 34fa2d6448 Merge branches 'w/8.2/improvement/S3C-4336_move_ModelVersion' and 'q/3573/8.1/improvement/S3C-4336_move_ModelVersion' into tmp/octopus/q/8.2 2021-05-06 00:05:55 +00:00
bert-e 054c92bc8a Merge branch 'w/8.1/improvement/S3C-4336_move_ModelVersion' into tmp/octopus/w/8.2/improvement/S3C-4336_move_ModelVersion 2021-05-05 18:31:54 +00:00
Taylor McKinnon ad1a299b9f Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-4336_move_ModelVersion' into w/8.1/improvement/S3C-4336_move_ModelVersion 2021-05-05 11:31:39 -07:00
bert-e e9adc508b3 Merge branch 'w/8.1/feature/S3C-3749_add_PutBucketEncyption_handler' into tmp/octopus/w/8.2/feature/S3C-3749_add_PutBucketEncyption_handler 2021-05-04 23:47:28 +00:00
Taylor McKinnon 33a79b88a7 Merge remote-tracking branch 'origin/feature/S3C-3749_add_PutBucketEncyption_handler' into w/8.1/feature/S3C-3749_add_PutBucketEncyption_handler 2021-05-04 16:46:54 -07:00
Rached Ben Mustapha 1359292388 feature: bump version to 8.2.12 2021-04-30 16:36:23 -07:00
Rached Ben Mustapha c27c4ef3e7 bugfix: report currently running overlay version 2021-04-29 16:59:11 -07:00
bert-e b5b4743822 Merge branch 'w/8.1/feature/S3C-3752_add_GetBucketEncryption_api' into tmp/octopus/w/8.2/feature/S3C-3752_add_GetBucketEncryption_api 2021-04-27 21:55:04 +00:00
Taylor McKinnon ba3a6fe19f Merge remote-tracking branch 'origin/feature/S3C-3752_add_GetBucketEncryption_api' into w/8.1/feature/S3C-3752_add_GetBucketEncryption_api 2021-04-27 14:52:58 -07:00
Alexander Chan 8ec1cf01c7 bump cloudserver version 2021-04-24 15:40:45 -07:00
Alexander Chan a05b89f707 ZENKO-3368: add auth chain backend 2021-04-24 15:40:21 -07:00
bert-e 3f0d46f706 Merge branch 'documentation/add-go-sample' into q/8.2 2021-04-15 03:32:15 +00:00
bert-e f32f353244 Merge branch 'bugfix/S3C-4245_enforce_bypassgovernancemode_policy' into tmp/octopus/w/8.1/bugfix/S3C-4245_enforce_bypassgovernancemode_policy 2021-04-14 00:37:08 +00:00
bert-e 58b070dec4 Merge branch 'w/8.1/bugfix/S3C-4245_enforce_bypassgovernancemode_policy' into tmp/octopus/w/8.2/bugfix/S3C-4245_enforce_bypassgovernancemode_policy 2021-04-14 00:37:08 +00:00
bert-e 6adf4eb265 Merge branch 'w/8.2/bugfix/S3C-4245_compliance_mode_fixes' into tmp/octopus/q/8.2 2021-04-12 19:14:24 +00:00
Ronnie Smith d934090322
Merge branch 'development/8.2' into documentation/add-go-sample 2021-04-12 10:30:48 -07:00
Ronnie Smith 45fa71e6e5
feature S3C-4213: Add docs for using vault 2021-04-09 16:23:59 -07:00
bert-e 18a68a1778 Merge branch 'bugfix/S3C-4245_compliance_mode_fixes' into tmp/octopus/w/8.1/bugfix/S3C-4245_compliance_mode_fixes 2021-04-09 22:06:22 +00:00
bert-e 6ce0829832 Merge branch 'w/8.1/bugfix/S3C-4245_compliance_mode_fixes' into tmp/octopus/w/8.2/bugfix/S3C-4245_compliance_mode_fixes 2021-04-09 22:06:22 +00:00
bert-e 6e95095943 Merge branch 'w/8.1/bugfix/S3C-4276_update_kmip_certs' into tmp/octopus/w/8.2/bugfix/S3C-4276_update_kmip_certs 2021-04-09 15:56:50 +00:00
bert-e e7b12e2d1b Merge branch 'bugfix/S3C-4276_update_kmip_certs' into tmp/octopus/w/8.1/bugfix/S3C-4276_update_kmip_certs 2021-04-09 15:56:50 +00:00
Ronnie Smith 75b22cbe35
docs: Add go sample to clients 2021-04-06 16:06:18 -07:00
bert-e 9dfd60b629 Merge branch 'feature/ZENKO-3356-release-8.2.10' into q/8.2 2021-04-02 22:10:17 +00:00
Rached Ben Mustapha 029703d5d7 feature: bump version to 8.2.10 2021-04-02 15:05:04 -07:00
bert-e e83c8ec255 Merge branch 'w/8.2/bugfix/S3C-4263-limitConcurrentOpsInTest' into tmp/octopus/q/8.2 2021-04-02 21:50:56 +00:00
bert-e 7b44b994d3 Merge branch 'w/7.10/bugfix/S3C-4263-limitConcurrentOpsInTest' into tmp/octopus/w/8.1/bugfix/S3C-4263-limitConcurrentOpsInTest 2021-04-02 00:46:14 +00:00
bert-e cdc1d8879b Merge branch 'w/8.1/bugfix/S3C-4263-limitConcurrentOpsInTest' into tmp/octopus/w/8.2/bugfix/S3C-4263-limitConcurrentOpsInTest 2021-04-02 00:46:14 +00:00
Rached Ben Mustapha 2c902b188a bf: upgrade arsenal for hdclient fix 2021-03-26 23:58:34 +00:00
Alexander Chan 916cb62e60 update utapi dependency 2021-03-18 15:44:30 -07:00
bert-e 985c811f74 Merge branch 'w/8.1/bugfix/S3C-4166_putDeleteMarkerObject_increments_numberOfObjects' into tmp/octopus/w/8.2/bugfix/S3C-4166_putDeleteMarkerObject_increments_numberOfObjects 2021-03-17 20:50:58 +00:00
bert-e f8591690cb Merge branch 'w/7.10/bugfix/S3C-4166_putDeleteMarkerObject_increments_numberOfObjects' into tmp/octopus/w/8.1/bugfix/S3C-4166_putDeleteMarkerObject_increments_numberOfObjects 2021-03-17 20:50:57 +00:00
Alexander Chan 83ed02ad08 bugfix: ZENKO-3294 use correctly formatted utapi local cache sentinel param 2021-03-17 08:20:29 -07:00
naren-scality c811b7d533 Merge remote-tracking branch 'origin/bugfix/zenko-3284-mongo-tests-failure-corrections' into w/8.2/bugfix/zenko-3284-mongo-tests-failure-corrections 2021-03-17 07:40:24 -07:00
naren-scality 112a08ee92 bf zenko-3284 mongo ft tests failure corrections 2021-03-16 14:15:24 -07:00
bert-e c023939414 Merge branch 'w/8.1/bugfix/S3C-4138_fix_aborted_mpu_metrics' into tmp/octopus/w/8.2/bugfix/S3C-4138_fix_aborted_mpu_metrics 2021-03-16 03:42:52 +00:00
bert-e 337714fe21 Merge branch 'w/7.10/bugfix/S3C-4138_fix_aborted_mpu_metrics' into tmp/octopus/w/8.1/bugfix/S3C-4138_fix_aborted_mpu_metrics 2021-03-16 03:42:51 +00:00
bert-e f2527c2816 Merge branch 'w/8.1/bugfix/S3C-4134_prevent_nan_sizedelta' into tmp/octopus/w/8.2/bugfix/S3C-4134_prevent_nan_sizedelta 2021-03-15 16:30:13 +00:00
bert-e 8ccb3013e7 Merge branch 'w/7.10/bugfix/S3C-4134_prevent_nan_sizedelta' into tmp/octopus/w/8.1/bugfix/S3C-4134_prevent_nan_sizedelta 2021-03-15 16:30:12 +00:00
Rahul Padigela 9ff6364b2e Merge remote-tracking branch 'origin/w/8.1/bugfix/S3C-4055-cloudserver-metric-corrections' into w/8.2/bugfix/S3C-4055-cloudserver-metric-corrections 2021-03-09 21:34:32 -08:00
Rahul Padigela d812970cc8 bugfix: add utapi v2 test cmd 2021-03-09 21:27:06 -08:00
Rahul Padigela a64ad91e61 Merge remote-tracking branch 'origin/w/8.1/bugfix/S3C-4055-cloudserver-metric-corrections' into w/8.2/bugfix/S3C-4055-cloudserver-metric-corrections 2021-03-09 21:20:24 -08:00
Rahul Padigela 651abc372b Merge remote-tracking branch 'origin/w/7.10/bugfix/S3C-4055-cloudserver-metric-corrections' into w/8.1/bugfix/S3C-4055-cloudserver-metric-corrections 2021-03-09 21:14:05 -08:00
Alexander Chan 4a687feaaf ZENKO-3230: fix Orbit instance registration request
fixes issue where the registration response is stored as a string; add
`json` option to registration request in order parse response body as an
object.
2021-03-08 08:54:46 -08:00
Taylor McKinnon 7cd6de8178 Merge remote-tracking branch 'origin/w/8.1/improvement/S3C-3971_reduce_event_footprint' into w/8.2/improvement/S3C-3971_reduce_event_footprint 2021-02-11 13:15:39 -08:00
Taylor McKinnon 09d21ecfdf Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-3971_reduce_event_footprint' into w/8.1/improvement/S3C-3971_reduce_event_footprint 2021-02-11 13:12:18 -08:00
bert-e 2f26577680 Merge branch 'bugfix/ZENKO-2153-fix-tests' into tmp/octopus/w/8.2/bugfix/ZENKO-2153-fix-tests 2021-02-11 10:04:46 +00:00
alexandre merle 1845d9e06d tests: ZENKO-2153: skip retention / object lock / legal hold for ceph 2021-02-11 10:59:10 +01:00
alexandre merle 5d0dc99b80 tests: ZENKO-2153: fix versioning state change if replication 2021-02-11 10:59:10 +01:00
alexandre merle 3619c4c296 bugfix: ZENKO-2153: init metadata wrapper for tests 2021-02-11 10:59:10 +01:00
alexandre merle 237913e604 bugfix: ZENKO-2153: use MPU testing for multiple backend 2021-02-11 10:59:10 +01:00
alexandre merle 9ef5a12af5 bugfix: ZENKO-3243: sanity check on complete upload 2021-02-11 10:59:10 +01:00
alexandre merle 230a4cc2e7 bugfix: ZENKO-3241: considering range start 0 as valid 2021-02-11 10:59:10 +01:00
alexandre merle 798191ac8d tests: ZENKO-2153: activate aws debug logging 2021-02-11 10:59:10 +01:00
alexandre merle b094948064 bugfix: ZENKO-2352: fix aws tests 2021-02-11 10:59:10 +01:00
alexandre merle 7fca140ab9 improv: ZENKO-2153: use agentkeepalive for global agent 2021-02-11 10:59:06 +01:00
alexandre merle a2fb48c9da bugfix: ZENKO-2153: aws sdk upgrade 2021-02-11 10:57:57 +01:00
alexandre merle 01bd8783e9 bugfix: ZENKO-2153: Remove .only() 2021-02-11 10:57:57 +01:00
alexandre merle 3ebb55a59c Merge remote-tracking branch 'origin/w/8.1/bugfix/ZENKO-2153-update-arsenal' into w/8.2/bugfix/ZENKO-2153-update-arsenal 2021-02-11 09:32:52 +01:00
alexandre merle 8d7f242998 Merge remote-tracking branch 'origin/w/7.10/bugfix/ZENKO-2153-update-arsenal' into w/8.1/bugfix/ZENKO-2153-update-arsenal 2021-02-11 09:31:27 +01:00
bert-e 89dde58a58 Merge branch 'w/8.1/improvement/S3C-3974-remove-s3blaster' into tmp/octopus/w/8.2/improvement/S3C-3974-remove-s3blaster 2021-02-11 03:10:48 +00:00
Rahul Padigela 61fa056e70 Merge remote-tracking branch 'origin/w/7.10/improvement/S3C-3974-remove-s3blaster' into w/8.1/improvement/S3C-3974-remove-s3blaster 2021-02-10 18:04:37 -08:00
bert-e 69aabd61b7 Merge branch 'w/8.1/bugfix/S3C-2642-multi-object-delete-xml-decl' into tmp/octopus/w/8.2/bugfix/S3C-2642-multi-object-delete-xml-decl 2021-02-06 08:56:58 +00:00
bert-e 96edc0ff9b Merge branch 'w/7.9/bugfix/S3C-2642-multi-object-delete-xml-decl' into tmp/octopus/w/8.1/bugfix/S3C-2642-multi-object-delete-xml-decl 2021-02-06 08:56:57 +00:00
alexandre merle 9b571ffd4a Merge remote-tracking branch 'origin/w/8.1/bugfix/S3C-3904-better-s3-action-logs' into w/8.2/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 21:58:13 +01:00
alexandre merle c5cc941609 Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-3904-better-s3-action-logs' into w/8.1/bugfix/S3C-3904-better-s3-action-logs 2021-02-05 21:57:22 +01:00
Thomas Carmet e0e3cbc2a5 ZENKO-3106 updating package.json version to 8.2.8 2021-02-04 17:42:32 -08:00
Thomas Carmet 92cf756f78 Merge remote-tracking branch 'origin/feature/ZENKO-3106-add-release-stage' into w/8.2/feature/ZENKO-3106-add-release-stage 2021-02-04 17:42:25 -08:00
Thomas Carmet 9bc11309eb ZENKO-3106 updating package.json to the next version 2021-02-04 17:41:46 -08:00
Thomas Carmet d8dd11101b ZENKO-3106 setting up release stage for cloudserver
Co-authored-by: William Abernathy <william.abernathy@scality.com>
2021-02-04 17:41:46 -08:00
bert-e 890ec70e01 Merge branch 'w/8.1/bugfix/S3C-3860-fix-corner-case-multiple-backend' into tmp/octopus/w/8.2/bugfix/S3C-3860-fix-corner-case-multiple-backend 2021-02-02 02:08:09 +00:00
alexandre merle 5df7ecf776
Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-3860-fix-corner-case-multiple-backend' into w/8.1/bugfix/S3C-3860-fix-corner-case-multiple-backend 2021-02-02 03:07:19 +01:00
bert-e e6fc52f0a7 Merge branch 'w/8.1/feature/S3C-2767-bp-error-code' into tmp/octopus/w/8.2/feature/S3C-2767-bp-error-code 2021-01-28 18:20:46 +00:00
bert-e 75f9ae2ab2 Merge branch 'feature/S3C-2767-bp-error-code' into tmp/octopus/w/8.1/feature/S3C-2767-bp-error-code 2021-01-28 18:20:45 +00:00
alexandre merle 2300ef77ed Merge remote-tracking branch 'origin/w/8.1/improvement/S3C-3897-upgrade-aws-sdk' into w/8.2/improvement/S3C-3897-upgrade-aws-sdk 2021-01-27 20:57:50 +01:00
alexandre merle 37cedc6ade
Merge remote-tracking branch 'origin/w/7.9/improvement/S3C-3897-upgrade-aws-sdk' into w/8.1/improvement/S3C-3897-upgrade-aws-sdk 2021-01-27 20:53:51 +01:00
bert-e 862911f745 Merge branch 'w/8.1/bugfix/S3C-2201-econnreset-rest-client-keep-alive' into tmp/octopus/w/8.2/bugfix/S3C-2201-econnreset-rest-client-keep-alive 2021-01-26 10:50:28 +00:00
alexandre merle b553a2a84b
Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-2201-econnreset-rest-client-keep-alive' into w/8.1/bugfix/S3C-2201-econnreset-rest-client-keep-alive 2021-01-26 11:49:38 +01:00
bert-e 36feeac832 Merge branches 'w/8.2/bugfix/S3C-3860-put-object-continue-too-early' and 'q/3282/8.1/bugfix/S3C-3860-put-object-continue-too-early' into tmp/octopus/q/8.2 2021-01-26 07:17:54 +00:00
bert-e 223739e2c9 Merge branches 'w/8.1/bugfix/S3C-3860-put-object-continue-too-early' and 'q/3282/7.9/bugfix/S3C-3860-put-object-continue-too-early' into tmp/octopus/q/8.1 2021-01-26 07:17:54 +00:00
bert-e c79ffc5216 Merge branch 'w/8.1/bugfix/S3C-3860-put-object-continue-too-early' into tmp/octopus/w/8.2/bugfix/S3C-3860-put-object-continue-too-early 2021-01-25 20:07:44 +00:00
alexandre merle 23e1235fb0
Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-3860-put-object-continue-too-early' into w/8.1/bugfix/S3C-3860-put-object-continue-too-early 2021-01-25 21:07:16 +01:00
bert-e 2e20d665ae Merge branches 'w/8.2/bugfix/S3C-3425-bump-arsenal-version' and 'q/3223/8.1/bugfix/S3C-3425-bump-arsenal-version' into tmp/octopus/q/8.2 2021-01-22 22:30:06 +00:00
bert-e 3449c5aae2 Merge branches 'w/8.1/bugfix/S3C-3425-bump-arsenal-version' and 'q/3223/7.9/bugfix/S3C-3425-bump-arsenal-version' into tmp/octopus/q/8.1 2021-01-22 22:30:05 +00:00
bert-e 638afaced4 Merge branch 'w/8.1/bugfix/S3C-3425-bump-arsenal-version' into tmp/octopus/w/8.2/bugfix/S3C-3425-bump-arsenal-version 2021-01-22 22:15:12 +00:00
Ilke 14bc466af7 Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-3425-bump-arsenal-version' into w/8.1/bugfix/S3C-3425-bump-arsenal-version 2021-01-22 14:02:57 -08:00
bert-e f29eb7c837 Merge branch 'w/8.1/bugfix/S3C-3416-data-backend-uuid-for-delete-request' into tmp/octopus/w/8.2/bugfix/S3C-3416-data-backend-uuid-for-delete-request 2021-01-21 15:28:53 +00:00
bert-e ce4d1eecf7 Merge branch 'w/7.9/bugfix/S3C-3416-data-backend-uuid-for-delete-request' into tmp/octopus/w/8.1/bugfix/S3C-3416-data-backend-uuid-for-delete-request 2021-01-21 15:28:52 +00:00
bert-e 58d9bf4453 Merge branch 'w/8.1/improvement/S3C-3779-add-parts-count-header' into tmp/octopus/w/8.2/improvement/S3C-3779-add-parts-count-header 2021-01-13 02:18:39 +00:00
Dora Korpar f9fbe2ca15 Merge remote-tracking branch 'origin/w/7.9/improvement/S3C-3779-add-parts-count-header' into w/8.1/improvement/S3C-3779-add-parts-count-header 2021-01-12 18:06:55 -08:00
bert-e 07adb04d6b Merge branch 'w/8.1/feature/S3C-3772_add_utapiv2_client_tls_config_support' into tmp/octopus/w/8.2/feature/S3C-3772_add_utapiv2_client_tls_config_support 2021-01-06 23:04:35 +00:00
Taylor McKinnon c0a2ee190e Merge remote-tracking branch 'origin/feature/S3C-3772_add_utapiv2_client_tls_config_support' into w/8.1/feature/S3C-3772_add_utapiv2_client_tls_config_support 2021-01-06 15:04:01 -08:00
bert-e ad7b370211 Merge branches 'w/8.2/bugfix/S3C-3554-bucket-notif-iam-policy-eval' and 'q/3179/8.1/bugfix/S3C-3554-bucket-notif-iam-policy-eval' into tmp/octopus/q/8.2 2020-12-28 05:43:45 +00:00
bert-e 0821069a91 Merge branch 'q/3179/7.9/bugfix/S3C-3554-bucket-notif-iam-policy-eval' into tmp/normal/q/8.1 2020-12-28 05:43:44 +00:00
bert-e 637e01d7ee Merge branch 'w/8.1/bugfix/S3C-3554-bucket-notif-iam-policy-eval' into tmp/octopus/w/8.2/bugfix/S3C-3554-bucket-notif-iam-policy-eval 2020-12-28 05:16:31 +00:00
Dora Korpar 020e65b548 Merge remote-tracking branch 'origin/bugfix/S3C-3554-bucket-notif-iam-policy-eval' into w/8.1/bugfix/S3C-3554-bucket-notif-iam-policy-eval 2020-12-27 21:14:59 -08:00
bert-e 646238f9ee Merge branch 'w/8.1/bugfix/S3C-2665-backportBatchDeleteBackbeatRoute' into tmp/octopus/w/8.2/bugfix/S3C-2665-backportBatchDeleteBackbeatRoute 2020-12-22 21:36:30 +00:00
Jonathan Gramain d210c45dd4 Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-2665-backportBatchDeleteBackbeatRoute' into w/8.1/bugfix/S3C-2665-backportBatchDeleteBackbeatRoute 2020-12-22 13:18:33 -08:00
bert-e 4db246df92 Merge branch 'w/8.1/bugfix/S3C-3313-fix-bucketdel-abortmpu' into tmp/octopus/w/8.2/bugfix/S3C-3313-fix-bucketdel-abortmpu 2020-12-10 00:33:14 +00:00
bert-e 1b51ac7f94 Merge branch 'w/7.9/bugfix/S3C-3313-fix-bucketdel-abortmpu' into tmp/octopus/w/8.1/bugfix/S3C-3313-fix-bucketdel-abortmpu 2020-12-10 00:33:13 +00:00
bert-e 198f449da6 Merge branch 'w/8.1/bugfix/S3C-3313-fix-storageused' into tmp/octopus/w/8.2/bugfix/S3C-3313-fix-storageused 2020-12-08 08:03:04 +00:00
bert-e 1f5f22cf85 Merge branch 'w/7.9/bugfix/S3C-3313-fix-storageused' into tmp/octopus/w/8.1/bugfix/S3C-3313-fix-storageused 2020-12-08 08:03:03 +00:00
bert-e cd3aac2d8d Merge branch 'w/8.1/improvement/S3C-3653-add-fields' into tmp/octopus/w/8.2/improvement/S3C-3653-add-fields 2020-12-02 01:20:39 +00:00
bert-e 130639ab09 Merge branch 'w/7.9/improvement/S3C-3653-add-fields' into tmp/octopus/w/8.1/improvement/S3C-3653-add-fields 2020-12-02 01:20:39 +00:00
bert-e 22f88f40af Merge branch 'w/8.1/improvement/S3C-3512-bucket-obj-names-logs' into tmp/octopus/w/8.2/improvement/S3C-3512-bucket-obj-names-logs 2020-11-19 04:50:09 +00:00
bert-e 658cbc1c58 Merge branch 'w/7.9/improvement/S3C-3512-bucket-obj-names-logs' into tmp/octopus/w/8.1/improvement/S3C-3512-bucket-obj-names-logs 2020-11-19 04:50:08 +00:00
bert-e f75d044951 Merge branch 'w/8.1/improvement/S3C-3475-add-actions-in-logs' into tmp/octopus/w/8.2/improvement/S3C-3475-add-actions-in-logs 2020-11-19 02:28:14 +00:00
Dora Korpar 3f21abc555 revert mongodb for reportHandler tests 2020-11-18 18:27:55 -08:00
bert-e d35898b7c4 Merge branch 'w/8.1/improvement/S3C-3475-add-actions-in-logs' into tmp/octopus/w/8.2/improvement/S3C-3475-add-actions-in-logs 2020-11-19 00:12:00 +00:00
Dora Korpar 2f55560ea7 Merge remote-tracking branch 'origin/w/7.9/improvement/S3C-3475-add-actions-in-logs' into w/8.1/improvement/S3C-3475-add-actions-in-logs 2020-11-18 16:11:13 -08:00
bert-e 501067f8c2 Merge branch 'bugfix/ZENKO-2866-updateArsenalDep' into tmp/octopus/w/8.2/bugfix/ZENKO-2866-updateArsenalDep 2020-11-11 19:50:19 +00:00
Jonathan Gramain 73b6c3805f bugfix: ZENKO-2866 update arsenal dependency 2020-11-11 11:46:06 -08:00
Jonathan Gramain 0895df0c66 bugfix: ZENKO-2905 update arsenal dependency 2020-11-04 16:58:54 -08:00
bert-e a5c3d2cfe1 Merge branch 'w/8.1/bugfix/S3C-2687-acl-invalid-id' into tmp/octopus/w/8.2/bugfix/S3C-2687-acl-invalid-id 2020-11-02 21:43:15 +00:00
bert-e 1df9d24171 Merge branch 'w/7.9/bugfix/S3C-2687-acl-invalid-id' into tmp/octopus/w/8.1/bugfix/S3C-2687-acl-invalid-id 2020-11-02 21:43:14 +00:00
bert-e deed88b51d Merge branches 'w/8.2/bugfix/S3C-3350-partnumber-exception-port' and 'q/3115/8.1/bugfix/S3C-3350-partnumber-exception-port' into tmp/octopus/q/8.2 2020-10-30 17:19:13 +00:00
bert-e 63a3aa886d Merge branches 'w/8.1/bugfix/S3C-3350-partnumber-exception-port' and 'q/3115/7.9/bugfix/S3C-3350-partnumber-exception-port' into tmp/octopus/q/8.1 2020-10-30 17:19:12 +00:00
bert-e 8ad69c5e35 Merge branch 'w/8.1/bugfix/S3C-3428-skip-obj-copy-tests' into tmp/octopus/w/8.2/bugfix/S3C-3428-skip-obj-copy-tests 2020-10-29 17:46:09 +00:00
bert-e fd395a8672 Merge branch 'w/7.9/bugfix/S3C-3428-skip-obj-copy-tests' into tmp/octopus/w/8.1/bugfix/S3C-3428-skip-obj-copy-tests 2020-10-29 17:46:08 +00:00
bert-e 18f32b56b4 Merge branch 'w/8.1/bugfix/S3C-3350-partnumber-exception-port' into tmp/octopus/w/8.2/bugfix/S3C-3350-partnumber-exception-port 2020-10-29 04:40:42 +00:00
bert-e a6b854d258 Merge branch 'w/7.9/bugfix/S3C-3350-partnumber-exception-port' into tmp/octopus/w/8.1/bugfix/S3C-3350-partnumber-exception-port 2020-10-29 04:40:42 +00:00
bert-e 5534af953e Merge branch 'w/8.1/bugfix/S3C-3460_increment_objDelta_only_on_mpu_completion' into tmp/octopus/w/8.2/bugfix/S3C-3460_increment_objDelta_only_on_mpu_completion 2020-10-27 23:59:40 +00:00
bert-e a45a129e86 Merge branch 'w/7.9/bugfix/S3C-3460_increment_objDelta_only_on_mpu_completion' into tmp/octopus/w/8.1/bugfix/S3C-3460_increment_objDelta_only_on_mpu_completion 2020-10-27 23:59:40 +00:00
bert-e f622e75711 Merge branch 'w/8.1/bugfix/S3C-3428-skip-obj-copy-tests-e2e' into tmp/octopus/w/8.2/bugfix/S3C-3428-skip-obj-copy-tests-e2e 2020-10-27 19:03:49 +00:00
bert-e 0ee82114a9 Merge branch 'w/7.9/bugfix/S3C-3428-skip-obj-copy-tests-e2e' into tmp/octopus/w/8.1/bugfix/S3C-3428-skip-obj-copy-tests-e2e 2020-10-27 19:03:48 +00:00
bert-e ebdec29c88 Merge branch 'w/8.1/bugfix/S3C-3465-remove-extra-ms-digits' into tmp/octopus/w/8.2/bugfix/S3C-3465-remove-extra-ms-digits 2020-10-23 23:18:13 +00:00
bert-e c8067d3206 Merge branch 'w/7.9/bugfix/S3C-3465-remove-extra-ms-digits' into tmp/octopus/w/8.1/bugfix/S3C-3465-remove-extra-ms-digits 2020-10-23 23:18:13 +00:00
bert-e 227873f2e5 Merge branches 'w/8.2/bugfix/S3C-3330-bucket-policy-action-mapping' and 'q/3058/8.1/bugfix/S3C-3330-bucket-policy-action-mapping' into tmp/octopus/q/8.2 2020-10-10 02:53:34 +00:00
bert-e 4dfd3c04ad Merge branches 'w/8.1/bugfix/S3C-3330-bucket-policy-action-mapping' and 'q/3058/7.9/bugfix/S3C-3330-bucket-policy-action-mapping' into tmp/octopus/q/8.1 2020-10-10 02:53:34 +00:00
bert-e c94eadf24b Merge branch 'w/8.1/bugfix/S3C-3350-partnumber-exception' into tmp/octopus/w/8.2/bugfix/S3C-3350-partnumber-exception 2020-10-09 06:15:17 +00:00
Rahul Padigela e40203378f Merge remote-tracking branch 'origin/w/7.9/bugfix/S3C-3350-partnumber-exception' into w/8.1/bugfix/S3C-3350-partnumber-exception 2020-10-08 23:06:56 -07:00
bert-e 834d4c1721 Merge branch 'w/8.1/bugfix/S3C-3330-bucket-policy-action-mapping' into tmp/octopus/w/8.2/bugfix/S3C-3330-bucket-policy-action-mapping 2020-10-08 05:01:55 +00:00
bert-e 06f56dd2f0 Merge branch 'w/7.9/bugfix/S3C-3330-bucket-policy-action-mapping' into tmp/octopus/w/8.1/bugfix/S3C-3330-bucket-policy-action-mapping 2020-10-08 05:01:55 +00:00
bert-e 9519087fe7 Merge branches 'w/8.2/feature/ZENKO-2810-upload-short-version-docker-registry' and 'q/3031/8.1/feature/ZENKO-2810-upload-short-version-docker-registry' into tmp/octopus/q/8.2 2020-10-05 15:44:00 +00:00
bert-e 6cd445849e Merge branch 'feature/ZENKO-2810-upload-short-version-docker-registry' into q/8.1 2020-10-05 15:43:59 +00:00
Dora Korpar 9bea862bdf Merge remote-tracking branch 'origin/w/8.1/feature/S3C-1801-policy-tag-condition-keys' into w/8.2/feature/S3C-1801-policy-tag-condition-keys 2020-09-30 17:02:17 -07:00
Dora Korpar 878102280a Merge remote-tracking branch 'origin/feature/S3C-1801-policy-tag-condition-keys' into w/8.1/feature/S3C-1801-policy-tag-condition-keys 2020-09-30 16:05:14 -07:00
bert-e 4e6f7b1309 Merge branches 'w/8.2/bugfix/S3C-2775-utapiCRRFix' and 'q/3014/8.1/bugfix/S3C-2775-utapiCRRFix' into tmp/octopus/q/8.2 2020-09-16 23:09:25 +00:00
bert-e 1715bc3dce Merge branches 'w/8.1/bugfix/S3C-2775-utapiCRRFix' and 'q/3014/7.8/bugfix/S3C-2775-utapiCRRFix' into tmp/octopus/q/8.1 2020-09-16 23:09:24 +00:00
bert-e 200bfe594e Merge branch 'feature/ZENKO-2810-upload-short-version-docker-registry' into tmp/octopus/w/8.2/feature/ZENKO-2810-upload-short-version-docker-registry 2020-09-16 21:46:53 +00:00
Thomas Carmet e83ec2e1ca ZENKO-2810 upload docker image with short version tag
The goal of this code is to allow us to obtain docker images tag
in our registry that look like the following: `latest-8.1` or `latest-8.2`

Those tags will match the content of the tip of a given development/x.y branch
2020-09-16 14:44:11 -07:00
bert-e 67a1516221 Merge branch 'w/8.1/bugfix/S3C-3042-repl-versioning' into tmp/octopus/w/8.2/bugfix/S3C-3042-repl-versioning 2020-09-15 06:25:57 +00:00
Rahul Padigela 86e8d3412f Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-3042-repl-versioning' into w/8.1/bugfix/S3C-3042-repl-versioning 2020-09-14 23:25:09 -07:00
bert-e e6efb5816e Merge branch 'w/8.1/bugfix/S3C-2775-utapiCRRFix' into tmp/octopus/w/8.2/bugfix/S3C-2775-utapiCRRFix 2020-09-14 22:44:01 +00:00
Jonathan Gramain 4127cbef21 Merge remote-tracking branch 'origin/bugfix/S3C-2775-utapiCRRFix' into w/8.1/bugfix/S3C-2775-utapiCRRFix 2020-09-14 15:38:59 -07:00
bert-e 3139fa0f9f Merge branch 'w/8.1/bugfix/S3C-1232_add_account_id_validation_for_list_metrics' into tmp/octopus/w/8.2/bugfix/S3C-1232_add_account_id_validation_for_list_metrics 2020-09-04 22:23:42 +00:00
bert-e 211cfa9a4e Merge branch 'w/7.8/bugfix/S3C-1232_add_account_id_validation_for_list_metrics' into tmp/octopus/w/8.1/bugfix/S3C-1232_add_account_id_validation_for_list_metrics 2020-09-04 22:23:41 +00:00
bert-e ac0558fb1f Merge branch 'w/8.1/bugfix/S3C-3303-put-empty-notif-config' into tmp/octopus/w/8.2/bugfix/S3C-3303-put-empty-notif-config 2020-09-03 02:08:43 +00:00
Dora Korpar 3068b6ddf3 Merge remote-tracking branch 'origin/bugfix/S3C-3303-put-empty-notif-config' into w/8.1/bugfix/S3C-3303-put-empty-notif-config 2020-09-02 18:56:39 -07:00
bert-e ae94b94b4c Merge branch 'w/8.1/bugfix/S3C-3301-bucket-notif-config-check-fix' into tmp/octopus/w/8.2/bugfix/S3C-3301-bucket-notif-config-check-fix 2020-09-01 15:32:13 +00:00
bert-e 5510c329ec Merge branch 'bugfix/S3C-3301-bucket-notif-config-check-fix' into tmp/octopus/w/8.1/bugfix/S3C-3301-bucket-notif-config-check-fix 2020-09-01 15:32:13 +00:00
bert-e e7fe656fcf Merge branch 'w/8.1/feature/S3C-2798-get-bucket-notification' into tmp/octopus/w/8.2/feature/S3C-2798-get-bucket-notification 2020-08-27 03:39:37 +00:00
Dora Korpar 11f88abe35 Merge remote-tracking branch 'origin/feature/S3C-2798-get-bucket-notification' into w/8.1/feature/S3C-2798-get-bucket-notification 2020-08-26 20:28:54 -07:00
bert-e 0d322df197 Merge branch 'w/8.1/feature/S3C-2797-put-bucket-notification' into tmp/octopus/w/8.2/feature/S3C-2797-put-bucket-notification 2020-08-26 06:30:22 +00:00
Dora Korpar 885d973766 revert mongo yarn lock upgrade 2020-08-25 23:30:03 -07:00
bert-e a9cc81ad3a Merge branch 'w/8.1/feature/S3C-2797-put-bucket-notification' into tmp/octopus/w/8.2/feature/S3C-2797-put-bucket-notification 2020-08-25 21:32:57 +00:00
Dora Korpar 4ea7b2e496 Merge remote-tracking branch 'origin/feature/S3C-2797-put-bucket-notification' into w/8.1/feature/S3C-2797-put-bucket-notification 2020-08-25 14:19:53 -07:00
bert-e 118926a4a0 Merge branches 'w/8.2/feature/S3C-3238-bucket-notification-config' and 'q/2934/8.1/feature/S3C-3238-bucket-notification-config' into tmp/octopus/q/8.2 2020-08-17 18:37:47 +00:00
bert-e 431cc40a94 Merge branches 'w/8.1/feature/S3C-3238-bucket-notification-config' and 'q/2934/7.8/feature/S3C-3238-bucket-notification-config' into tmp/octopus/q/8.1 2020-08-17 18:37:47 +00:00
bert-e 3172fe2621 Merge branch 'w/8.1/bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.2/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 21:42:19 +00:00
bert-e 95f2782fa4 Merge branch 'bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.1/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 21:42:18 +00:00
bert-e 8fb1160deb Merge branch 'w/8.1/bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.2/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 21:06:21 +00:00
bert-e 1b59974eba Merge branch 'bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.1/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 21:06:20 +00:00
bert-e a16ceb6586 Merge branch 'w/8.1/bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.2/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 19:47:23 +00:00
bert-e 6c86fec043 Merge branch 'bugfix/S3C-3263_incorrect_objectDelta' into tmp/octopus/w/8.1/bugfix/S3C-3263_incorrect_objectDelta 2020-08-14 19:47:22 +00:00
bert-e a903862aab Merge branch 'w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only' into tmp/octopus/w/8.2/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-12 20:54:08 +00:00
Jonathan Gramain ceb8d3b6f9 Merge remote-tracking branch 'origin/w/7.8/bugfix/ZENKO-2729-raw-node-tests-remove-only' into w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-12 13:53:50 -07:00
bert-e 1b7a0182c2 Merge branches 'development/8.2' and 'w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only' into tmp/octopus/w/8.2/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-12 19:56:48 +00:00
bert-e c35cac674b Merge branches 'development/8.1' and 'w/7.8/bugfix/ZENKO-2729-raw-node-tests-remove-only' into tmp/octopus/w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-12 19:56:47 +00:00
bert-e aecbec4e6c Merge branch 'w/8.1/feature/S3C-3238-bucket-notification-config' into tmp/octopus/w/8.2/feature/S3C-3238-bucket-notification-config 2020-08-12 07:21:37 +00:00
bert-e 9f0023157b Merge branch 'feature/S3C-3238-bucket-notification-config' into tmp/octopus/w/8.1/feature/S3C-3238-bucket-notification-config 2020-08-12 07:21:37 +00:00
bert-e d414c4a5bb Merge branches 'w/8.2/feature/S3C-3229-bucketnotif-objmd-update' and 'q/2929/8.1/feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/q/8.2 2020-08-12 07:16:11 +00:00
bert-e a9b2b891f8 Merge branches 'w/8.1/feature/S3C-3229-bucketnotif-objmd-update' and 'q/2929/7.8/feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/q/8.1 2020-08-12 07:16:10 +00:00
bert-e 8e247df0c4 Merge branch 'w/8.1/feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/w/8.2/feature/S3C-3229-bucketnotif-objmd-update 2020-08-12 04:10:23 +00:00
Dora Korpar 430ddf1cad revert yarn mongo update 2020-08-11 21:09:49 -07:00
bert-e af69c7edc5 Merge branch 'w/8.1/feature/S3C-3238-bucket-notification-config' into tmp/octopus/w/8.2/feature/S3C-3238-bucket-notification-config 2020-08-12 02:05:18 +00:00
bert-e 73636b9af6 Merge branch 'feature/S3C-3238-bucket-notification-config' into tmp/octopus/w/8.1/feature/S3C-3238-bucket-notification-config 2020-08-12 02:05:18 +00:00
bert-e 3d8dc4c83e Merge branch 'w/8.1/feature/S3C-3229-bucketnotif-objmd-update' into tmp/octopus/w/8.2/feature/S3C-3229-bucketnotif-objmd-update 2020-08-11 23:25:47 +00:00
Dora Korpar fe87fbebe6 Merge remote-tracking branch 'origin/feature/S3C-3229-bucketnotif-objmd-update' into w/8.1/feature/S3C-3229-bucketnotif-objmd-update 2020-08-11 16:12:18 -07:00
bert-e 748f2276dc Merge branch 'w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only' into tmp/octopus/w/8.2/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-11 19:36:27 +00:00
bert-e 5ea3bbd496 Merge branch 'w/7.8/bugfix/ZENKO-2729-raw-node-tests-remove-only' into tmp/octopus/w/8.1/bugfix/ZENKO-2729-raw-node-tests-remove-only 2020-08-11 19:36:26 +00:00
bert-e b24cb5f367 Merge branch 'w/8.1/bugfix/S3C-3246-pushmetric-getobject' into tmp/octopus/w/8.2/bugfix/S3C-3246-pushmetric-getobject 2020-08-11 17:41:15 +00:00
bert-e 20e58d85f9 Merge branch 'bugfix/S3C-3246-pushmetric-getobject' into tmp/octopus/w/8.1/bugfix/S3C-3246-pushmetric-getobject 2020-08-11 17:41:14 +00:00
bert-e 5d930f52a6 Merge branch 'w/8.1/bugfix/S3C-2801-tagging-functional-tests' into tmp/octopus/w/8.2/bugfix/S3C-2801-tagging-functional-tests 2020-08-08 00:56:40 +00:00
bert-e 7ebd0c9d58 Merge branch 'w/7.8/bugfix/S3C-2801-tagging-functional-tests' into tmp/octopus/w/8.1/bugfix/S3C-2801-tagging-functional-tests 2020-08-08 00:56:40 +00:00
bert-e ab9b0b8de6 Merge branches 'w/8.2/feature/S3C-3132-utapi-v2-push-metric' and 'q/2912/8.1/feature/S3C-3132-utapi-v2-push-metric' into tmp/octopus/q/8.2 2020-08-07 21:45:21 +00:00
bert-e 16a7b44090 Merge branches 'w/8.1/feature/S3C-3132-utapi-v2-push-metric' and 'q/2912/7.8/feature/S3C-3132-utapi-v2-push-metric' into tmp/octopus/q/8.1 2020-08-07 21:45:20 +00:00
bert-e dbf44586a6 Merge branch 'w/8.1/feature/S3C-3132-utapi-v2-push-metric' into tmp/octopus/w/8.2/feature/S3C-3132-utapi-v2-push-metric 2020-08-07 21:26:31 +00:00
Dora Korpar 1c42602831 remove mongo package updates 2020-08-07 13:50:27 -07:00
Dora Korpar 2c968a5cf9 Merge remote-tracking branch 'origin/w/8.1/feature/S3C-3132-utapi-v2-push-metric' into w/8.2/feature/S3C-3132-utapi-v2-push-metric 2020-08-05 01:11:20 -07:00
Dora Korpar 1bd0da04b9 Merge remote-tracking branch 'origin/feature/S3C-3132-utapi-v2-push-metric' into w/8.1/feature/S3C-3132-utapi-v2-push-metric 2020-08-04 22:19:40 -07:00
bert-e 031ffe0344 Merge branches 'w/8.2/dependabot/add-v2-config-file' and 'q/2743/8.1/dependabot/add-v2-config-file' into tmp/octopus/q/8.2 2020-07-31 21:25:03 +00:00
bert-e f77069fd37 Merge branches 'w/8.1/dependabot/add-v2-config-file' and 'q/2743/7.8/dependabot/add-v2-config-file' into tmp/octopus/q/8.1 2020-07-31 21:25:03 +00:00
bert-e b16d0ab46f Merge branch 'w/8.1/dependabot/add-v2-config-file' into tmp/octopus/w/8.2/dependabot/add-v2-config-file 2020-07-29 18:46:11 +00:00
bert-e 8db4db3e96 Merge branch 'w/7.8/dependabot/add-v2-config-file' into tmp/octopus/w/8.1/dependabot/add-v2-config-file 2020-07-29 18:46:10 +00:00
Dora Korpar ca1c3a4287 Merge remote-tracking branch 'origin/w/8.1/feature/S3C-3196-update-node' into w/8.2/feature/S3C-3196-update-node 2020-07-28 14:32:57 -07:00
Dora Korpar 0d23133315 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3196-update-node' into w/8.1/feature/S3C-3196-update-node 2020-07-28 12:38:22 -07:00
Jonathan Gramain 827cfd2745 Merge remote-tracking branch 'origin/w/8.1/bugfix/S3C-2513-fixSproxydLocationConfigParsing' into w/8.2/bugfix/S3C-2513-fixSproxydLocationConfigParsing 2020-07-14 17:18:49 -07:00
Jonathan Gramain 8d9c6924d5 Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-2513-fixSproxydLocationConfigParsing' into w/8.1/bugfix/S3C-2513-fixSproxydLocationConfigParsing 2020-07-14 17:01:29 -07:00
bert-e abca264313 Merge branch 'w/8.1/improvement/S3C-3119_AddGetHeadObjFuncTests' into tmp/octopus/w/8.2/improvement/S3C-3119_AddGetHeadObjFuncTests 2020-07-10 01:05:25 +00:00
bert-e 4d24149ffd Merge branch 'w/7.8/improvement/S3C-3119_AddGetHeadObjFuncTests' into tmp/octopus/w/8.1/improvement/S3C-3119_AddGetHeadObjFuncTests 2020-07-10 01:05:24 +00:00
bert-e e7f55cbdd3 Merge branches 'w/8.2/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' and 'q/2843/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/q/8.2 2020-07-08 01:09:40 +00:00
bert-e fe36fe4e79 Merge branches 'w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' and 'q/2843/7.8/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/q/8.1 2020-07-08 01:09:40 +00:00
bert-e bad637485c Merge branches 'development/8.2' and 'w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/w/8.2/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-08 00:34:46 +00:00
bert-e 540df54c1a Merge branches 'development/8.1' and 'w/7.8/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-08 00:34:45 +00:00
Ilke 79dcc2a3cf update arsenal 2020-07-07 17:34:23 -07:00
bert-e a73843ffa1 Merge branch 'w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into tmp/octopus/w/8.2/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-07 23:51:53 +00:00
Ilke d2ef79a7f3 Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket' into w/8.1/bugfix/S3C-3130_handleObjectLockDisabledCaseForBucket 2020-07-07 16:46:28 -07:00
bert-e 79c6d2c714 Merge branch 'w/8.1/bugfix/S3C-3145-error-log-to-debug' into tmp/octopus/w/8.2/bugfix/S3C-3145-error-log-to-debug 2020-07-07 23:36:36 +00:00
bert-e 2a54e6ac00 Merge branch 'w/7.8/bugfix/S3C-3145-error-log-to-debug' into tmp/octopus/w/8.1/bugfix/S3C-3145-error-log-to-debug 2020-07-07 23:36:36 +00:00
bert-e cd710415e1 Merge branch 'w/8.1/bugfix/S3C-3145-obj-lock-delete-log-fix' into tmp/octopus/w/8.2/bugfix/S3C-3145-obj-lock-delete-log-fix 2020-07-07 22:19:41 +00:00
bert-e 34146b41b6 Merge branch 'w/7.8/bugfix/S3C-3145-obj-lock-delete-log-fix' into tmp/octopus/w/8.1/bugfix/S3C-3145-obj-lock-delete-log-fix 2020-07-07 22:19:40 +00:00
bert-e f7619ffa62 Merge branch 'w/8.1/bugfix/S3C-3121_fixPutBucketApiObjLock' into tmp/octopus/w/8.2/bugfix/S3C-3121_fixPutBucketApiObjLock 2020-07-03 01:16:50 +00:00
bert-e c8290ccb0c Merge branch 'w/7.8/bugfix/S3C-3121_fixPutBucketApiObjLock' into tmp/octopus/w/8.1/bugfix/S3C-3121_fixPutBucketApiObjLock 2020-07-03 01:16:49 +00:00
bert-e 2cfc03cfd1 Merge branch 'w/8.1/bugfix/S3C-3121_fixPutBucketApiObjLock' into tmp/octopus/w/8.2/bugfix/S3C-3121_fixPutBucketApiObjLock 2020-07-03 01:13:42 +00:00
Ilke 499706b300 Merge remote-tracking branch 'origin/w/7.8/bugfix/S3C-3121_fixPutBucketApiObjLock' into w/8.1/bugfix/S3C-3121_fixPutBucketApiObjLock 2020-07-02 18:04:36 -07:00
bert-e 86cee89dae Merge branch 'w/8.1/bugfix/S3C-3106-multiobj-delete-obj-lock-error' into tmp/octopus/w/8.2/bugfix/S3C-3106-multiobj-delete-obj-lock-error 2020-07-02 15:37:29 +00:00
bert-e b7fac2f4e3 Merge branch 'w/7.8/bugfix/S3C-3106-multiobj-delete-obj-lock-error' into tmp/octopus/w/8.1/bugfix/S3C-3106-multiobj-delete-obj-lock-error 2020-07-02 15:37:28 +00:00
bert-e 9758da31a4 Merge branches 'w/8.2/improvement/S3C-3120_updateBucketGetObjectLockApi' and 'q/2804/8.1/improvement/S3C-3120_updateBucketGetObjectLockApi' into tmp/octopus/q/8.2 2020-07-02 02:42:00 +00:00
bert-e 1cfb8bf663 Merge branches 'w/8.1/improvement/S3C-3120_updateBucketGetObjectLockApi' and 'q/2804/7.8/improvement/S3C-3120_updateBucketGetObjectLockApi' into tmp/octopus/q/8.1 2020-07-02 02:42:00 +00:00
bert-e 555cd1a8ff Merge branches 'w/8.2/feature/S3C-3118-flatten-objmd-retention' and 'q/2794/8.1/feature/S3C-3118-flatten-objmd-retention' into tmp/octopus/q/8.2 2020-07-02 00:22:09 +00:00
bert-e 3e9f96fe39 Merge branches 'w/8.1/feature/S3C-3118-flatten-objmd-retention' and 'q/2794/7.8/feature/S3C-3118-flatten-objmd-retention' into tmp/octopus/q/8.1 2020-07-02 00:22:08 +00:00
bert-e 29ff4f308b Merge branch 'w/8.1/improvement/S3C-3120_updateBucketGetObjectLockApi' into tmp/octopus/w/8.2/improvement/S3C-3120_updateBucketGetObjectLockApi 2020-07-01 23:58:30 +00:00
bert-e ece78c560e Merge branch 'w/7.8/improvement/S3C-3120_updateBucketGetObjectLockApi' into tmp/octopus/w/8.1/improvement/S3C-3120_updateBucketGetObjectLockApi 2020-07-01 23:58:30 +00:00
bert-e 63209c1fd0 Merge branch 'w/8.1/feature/S3C-3118-flatten-objmd-retention' into tmp/octopus/w/8.2/feature/S3C-3118-flatten-objmd-retention 2020-07-01 23:13:10 +00:00
Dora Korpar 5a8ea20a00 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3118-flatten-objmd-retention' into w/8.1/feature/S3C-3118-flatten-objmd-retention 2020-07-01 16:07:17 -07:00
bert-e 1c40688450 Merge branches 'w/8.2/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' and 'q/2761/8.1/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' into tmp/octopus/q/8.2 2020-07-01 19:25:51 +00:00
bert-e 184d4f195c Merge branches 'w/8.1/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' and 'q/2761/7.8/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' into tmp/octopus/q/8.1 2020-07-01 19:25:51 +00:00
bert-e 7e778b0a73 Merge branches 'w/8.2/bugfix/S3C-3115-skip-external-tests' and 'q/2774/8.1/bugfix/S3C-3115-skip-external-tests' into tmp/octopus/q/8.2 2020-07-01 05:53:50 +00:00
bert-e 453631bf99 Merge branches 'w/8.1/bugfix/S3C-3115-skip-external-tests' and 'q/2774/7.8/bugfix/S3C-3115-skip-external-tests' into tmp/octopus/q/8.1 2020-07-01 05:53:49 +00:00
bert-e 469774c107 Merge branch 'w/8.1/bugfix/S3C-3115-skip-external-tests' into tmp/octopus/w/8.2/bugfix/S3C-3115-skip-external-tests 2020-06-30 23:33:53 +00:00
bert-e 8008b452c7 Merge branch 'w/7.8/bugfix/S3C-3115-skip-external-tests' into tmp/octopus/w/8.1/bugfix/S3C-3115-skip-external-tests 2020-06-30 23:33:52 +00:00
bert-e 8a35787e01 Merge branch 'w/8.1/feature/S3C-3112_updateArsenalForObjLockSetter' into tmp/octopus/w/8.2/feature/S3C-3112_updateArsenalForObjLockSetter 2020-06-30 22:02:52 +00:00
Ilke 5e6a451fa3 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-3112_updateArsenalForObjLockSetter' into w/8.1/feature/S3C-3112_updateArsenalForObjLockSetter 2020-06-30 14:49:53 -07:00
bert-e e179468afd Merge branch 'w/8.1/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' into tmp/octopus/w/8.2/improvement/S3C-3113_PutGetObjectLockConfigFuncTests 2020-06-30 17:05:02 +00:00
bert-e 798fb01eb0 Merge branch 'w/7.8/improvement/S3C-3113_PutGetObjectLockConfigFuncTests' into tmp/octopus/w/8.1/improvement/S3C-3113_PutGetObjectLockConfigFuncTests 2020-06-30 17:05:01 +00:00
bert-e e15d1e9b10 Merge branch 'w/8.1/feature/S3C-2944_putObjectLegalHoldFunctionalTests' into tmp/octopus/w/8.2/feature/S3C-2944_putObjectLegalHoldFunctionalTests 2020-06-30 13:19:08 +00:00
bert-e cb26b99281 Merge branch 'w/7.8/feature/S3C-2944_putObjectLegalHoldFunctionalTests' into tmp/octopus/w/8.1/feature/S3C-2944_putObjectLegalHoldFunctionalTests 2020-06-30 13:19:08 +00:00
bert-e c674193a09 Merge branch 'w/8.1/feature/S3C-2788-obj-retention-func-tests' into tmp/octopus/w/8.2/feature/S3C-2788-obj-retention-func-tests 2020-06-30 00:48:07 +00:00
Dora Korpar 75e1dd1847 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2788-obj-retention-func-tests' into w/8.1/feature/S3C-2788-obj-retention-func-tests 2020-06-29 17:42:08 -07:00
bert-e 8ce48bdc61 Merge branch 'w/8.1/feature/S3C-2802_addFuncTestToHeadObj' into tmp/octopus/w/8.2/feature/S3C-2802_addFuncTestToHeadObj 2020-06-24 16:41:17 +00:00
bert-e d9999981c5 Merge branch 'w/7.8/feature/S3C-2802_addFuncTestToHeadObj' into tmp/octopus/w/8.1/feature/S3C-2802_addFuncTestToHeadObj 2020-06-24 16:41:16 +00:00
bert-e 50bbac994c Merge branch 'w/8.1/feature/S3C-2802_addFuncTestToHeadObj' into tmp/octopus/w/8.2/feature/S3C-2802_addFuncTestToHeadObj 2020-06-24 16:18:47 +00:00
bert-e 3bb1c5f4a3 Merge branch 'w/7.8/feature/S3C-2802_addFuncTestToHeadObj' into tmp/octopus/w/8.1/feature/S3C-2802_addFuncTestToHeadObj 2020-06-24 16:18:46 +00:00
bert-e fd38774b90 Merge branch 'w/8.1/feature/S3C-2960-update-utapi' into tmp/octopus/w/8.2/feature/S3C-2960-update-utapi 2020-06-18 01:41:32 +00:00
Dora Korpar c80282bde7 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2960-update-utapi' into w/8.1/feature/S3C-2960-update-utapi 2020-06-17 18:39:43 -07:00
bert-e ada86583a6 Merge branches 'w/8.2/feature/S3C-2974-object-lock-delete-apis' and 'q/2634/8.1/feature/S3C-2974-object-lock-delete-apis' into tmp/octopus/q/8.2 2020-06-18 00:53:37 +00:00
bert-e 4f18682a47 Merge branches 'w/8.1/feature/S3C-2974-object-lock-delete-apis' and 'q/2634/7.8/feature/S3C-2974-object-lock-delete-apis' into tmp/octopus/q/8.1 2020-06-18 00:53:36 +00:00
Dora Korpar e0d0d39e1f Merge remote-tracking branch 'origin/w/8.1/feature/S3C-2974-object-lock-delete-apis' into w/8.2/feature/S3C-2974-object-lock-delete-apis 2020-06-17 16:34:58 -07:00
Dora Korpar d54caa1f1f Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2974-object-lock-delete-apis' into w/8.1/feature/S3C-2974-object-lock-delete-apis 2020-06-17 16:31:40 -07:00
bert-e be6799bb5f Merge branches 'w/8.2/feature/S3C-3040_addObjLockActionsToBucketPolicy' and 'q/2683/8.1/feature/S3C-3040_addObjLockActionsToBucketPolicy' into tmp/octopus/q/8.2 2020-06-17 20:10:06 +00:00
bert-e fdd1331f08 Merge branches 'w/8.1/feature/S3C-3040_addObjLockActionsToBucketPolicy' and 'q/2683/7.8/feature/S3C-3040_addObjLockActionsToBucketPolicy' into tmp/octopus/q/8.1 2020-06-17 20:10:05 +00:00
bert-e 6db95c459b Merge branches 'w/8.2/feature/S3C-2792-mpu-object-retention' and 'q/2635/8.1/feature/S3C-2792-mpu-object-retention' into tmp/octopus/q/8.2 2020-06-17 19:23:09 +00:00
bert-e b925c4cca2 Merge branches 'w/8.1/feature/S3C-2792-mpu-object-retention' and 'q/2635/7.8/feature/S3C-2792-mpu-object-retention' into tmp/octopus/q/8.1 2020-06-17 19:23:09 +00:00
bert-e fb8ec2adc1 Merge branch 'w/8.1/feature/S3C-3040_addObjLockActionsToBucketPolicy' into tmp/octopus/w/8.2/feature/S3C-3040_addObjLockActionsToBucketPolicy 2020-06-17 17:58:25 +00:00
bert-e 063cc79104 Merge branch 'w/7.8/feature/S3C-3040_addObjLockActionsToBucketPolicy' into tmp/octopus/w/8.1/feature/S3C-3040_addObjLockActionsToBucketPolicy 2020-06-17 17:58:25 +00:00
bert-e 403a638356 Merge branch 'w/8.1/feature/S3C-2792-mpu-object-retention' into tmp/octopus/w/8.2/feature/S3C-2792-mpu-object-retention 2020-06-17 08:16:50 +00:00
Dora Korpar 872ac69d72 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2792-mpu-object-retention' into w/8.1/feature/S3C-2792-mpu-object-retention 2020-06-16 17:41:53 -07:00
bert-e 631c0e243a Merge branches 'w/8.2/improvement/S3C-3044-audit-logs-arsenal-hash-update' and 'q/2643/8.1/improvement/S3C-3044-audit-logs-arsenal-hash-update' into tmp/octopus/q/8.2 2020-06-16 19:00:29 +00:00
bert-e 9ec36eb24d Merge branches 'w/8.1/improvement/S3C-3044-audit-logs-arsenal-hash-update' and 'q/2643/7.8/improvement/S3C-3044-audit-logs-arsenal-hash-update' into tmp/octopus/q/8.1 2020-06-16 19:00:28 +00:00
bert-e 259331b579 Merge branch 'w/8.1/improvement/S3C-3044-audit-logs-arsenal-hash-update' into tmp/octopus/w/8.2/improvement/S3C-3044-audit-logs-arsenal-hash-update 2020-06-16 17:49:22 +00:00
bert-e 21e087def0 Merge branch 'w/7.8/improvement/S3C-3044-audit-logs-arsenal-hash-update' into tmp/octopus/w/8.1/improvement/S3C-3044-audit-logs-arsenal-hash-update 2020-06-16 17:49:21 +00:00
bert-e 653445f4cd Merge branch 'w/8.1/feature/S3C-2791_CopyObjectApiSupportsObjectLock' into tmp/octopus/w/8.2/feature/S3C-2791_CopyObjectApiSupportsObjectLock 2020-06-16 05:39:02 +00:00
Ilke 8f19bcd71f Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2791_CopyObjectApiSupportsObjectLock' into w/8.1/feature/S3C-2791_CopyObjectApiSupportsObjectLock 2020-06-15 22:27:21 -07:00
bert-e 3d313728e1 Merge branch 'w/8.1/feature/S3C-2946_GetObjectApiSupportsObjectLock' into tmp/octopus/w/8.2/feature/S3C-2946_GetObjectApiSupportsObjectLock 2020-06-16 03:25:08 +00:00
Ilke 78f060c19d Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2946_GetObjectApiSupportsObjectLock' into w/8.1/feature/S3C-2946_GetObjectApiSupportsObjectLock 2020-06-15 20:17:54 -07:00
bert-e 6196024246 Merge branch 'w/8.1/bugfix/S3C-3062-skip-tests' into tmp/octopus/w/8.2/bugfix/S3C-3062-skip-tests 2020-06-15 21:13:30 +00:00
bert-e cdaafb9b20 Merge branch 'w/7.8/bugfix/S3C-3062-skip-tests' into tmp/octopus/w/8.1/bugfix/S3C-3062-skip-tests 2020-06-15 21:13:29 +00:00
bert-e bc6490fc14 Merge branch 'w/8.2/feature/S3C-2791_PutObjectApiSupportsObjectLock' into tmp/octopus/q/8.2 2020-06-15 16:55:06 +00:00
bert-e f21bca0397 Merge branch 'w/8.1/feature/S3C-2791_PutObjectApiSupportsObjectLock' into tmp/octopus/w/8.2/feature/S3C-2791_PutObjectApiSupportsObjectLock 2020-06-15 16:19:15 +00:00
Ilke 82a7ffd5c1 fix unit test 2020-06-15 09:17:03 -07:00
bert-e e11928a0c7 Merge branch 'w/8.1/feature/S3C-2791_PutObjectApiSupportsObjectLock' into tmp/octopus/w/8.2/feature/S3C-2791_PutObjectApiSupportsObjectLock 2020-06-15 16:13:28 +00:00
Ilke 428632b943 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2791_PutObjectApiSupportsObjectLock' into w/8.1/feature/S3C-2791_PutObjectApiSupportsObjectLock 2020-06-15 09:07:40 -07:00
bert-e 8d9660b9ef Merge branch 'bugfix/ZENKO-2564/noProxyRequests' into q/8.2 2020-06-11 21:48:24 +00:00
bert-e 9ef8a05582 Merge branch 'w/8.1/feature/S3C-2788-get-obj-retention' into tmp/octopus/w/8.2/feature/S3C-2788-get-obj-retention 2020-06-10 07:27:17 +00:00
bert-e 754155c088 Merge branch 'w/7.8/feature/S3C-2788-get-obj-retention' into tmp/octopus/w/8.1/feature/S3C-2788-get-obj-retention 2020-06-10 07:27:16 +00:00
bert-e a8c731334a Merge branch 'w/8.1/bugfix/S3C-3018-multiobj-delete-error' into tmp/octopus/w/8.2/bugfix/S3C-3018-multiobj-delete-error 2020-06-10 01:03:01 +00:00
bert-e 6db9164b1a Merge branch 'w/7.8/bugfix/S3C-3018-multiobj-delete-error' into tmp/octopus/w/8.1/bugfix/S3C-3018-multiobj-delete-error 2020-06-10 01:03:01 +00:00
bert-e 7e9e54dd6b Merge branch 'w/8.1/feature/S3C-2787-put-obj-retention' into tmp/octopus/w/8.2/feature/S3C-2787-put-obj-retention 2020-06-06 05:24:35 +00:00
Dora Korpar 5204fa7964 Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2787-put-obj-retention' into w/8.1/feature/S3C-2787-put-obj-retention 2020-06-05 22:16:19 -07:00
bert-e 8a96f2d971 Merge branch 'w/8.1/feature/S3C-2945_getObjectLegalHoldApi' into tmp/octopus/w/8.2/feature/S3C-2945_getObjectLegalHoldApi 2020-06-05 21:19:30 +00:00
bert-e 24b09ba587 Merge branch 'w/7.8/feature/S3C-2945_getObjectLegalHoldApi' into tmp/octopus/w/8.1/feature/S3C-2945_getObjectLegalHoldApi 2020-06-05 21:19:30 +00:00
bert-e 77ae3346a7 Merge branch 'w/8.1/feature/S3C-2944_putObjectLegalHoldApi' into tmp/octopus/w/8.2/feature/S3C-2944_putObjectLegalHoldApi 2020-06-05 20:11:19 +00:00
Ilke 503698c96d Merge remote-tracking branch 'origin/w/7.8/feature/S3C-2944_putObjectLegalHoldApi' into w/8.1/feature/S3C-2944_putObjectLegalHoldApi 2020-06-05 13:03:28 -07:00
bert-e b9839e00b9 Merge branch 'w/8.1/feature/S3C-2777-object-lock-test-plan' into tmp/octopus/w/8.2/feature/S3C-2777-object-lock-test-plan 2020-06-03 17:17:13 +00:00
bert-e 910342e592 Merge branch 'w/7.8/feature/S3C-2777-object-lock-test-plan' into tmp/octopus/w/8.1/feature/S3C-2777-object-lock-test-plan 2020-06-03 17:17:12 +00:00
Alexander Chan 6b9606a740 bugfix: ZENKO-2564 non-proxied request for metrics
remove the use of `request` module for backbeat metric requests
2020-06-01 20:23:07 +00:00
bert-e b849b74beb Merge branch 'w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi' into tmp/octopus/w/8.2/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 23:58:00 +00:00
bert-e 7f2731f8f2 Merge branch 'feature/S3C-2790_GetObjectLockConfigurationApi' into tmp/octopus/w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 23:58:00 +00:00
bert-e 8587a12bd4 Merge branch 'w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi' into tmp/octopus/w/8.2/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 23:45:03 +00:00
bert-e 010bb8c063 Merge branch 'feature/S3C-2790_GetObjectLockConfigurationApi' into tmp/octopus/w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 23:45:02 +00:00
bert-e b9feecae7d Merge branch 'w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi' into tmp/octopus/w/8.2/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 23:28:56 +00:00
Ilke b22b836638 Merge remote-tracking branch 'origin/feature/S3C-2790_GetObjectLockConfigurationApi' into w/8.1/feature/S3C-2790_GetObjectLockConfigurationApi 2020-05-21 16:26:34 -07:00
bert-e edfc9d8ea1 Merge branch 'w/8.1/feature/S3C-2789-put-object-lock-configuration' into tmp/octopus/w/8.2/feature/S3C-2789-put-object-lock-configuration 2020-05-20 04:37:59 +00:00
Dora Korpar 192521d597 Merge remote-tracking branch 'origin/feature/S3C-2789-put-object-lock-configuration' into w/8.1/feature/S3C-2789-put-object-lock-configuration 2020-05-19 21:20:09 -07:00
bert-e 96af26c790 Merge branch 'bugfix/S3C-2959_ObjectLockAndNFSEnabledLocations' into tmp/octopus/w/8.2/bugfix/S3C-2959_ObjectLockAndNFSEnabledLocations 2020-05-19 16:49:44 +00:00
Ilke e1cb3c6b9c bf:S3C-2959 fix objectLock and NFSEnabled locations 2020-05-18 19:42:42 -07:00
bert-e 2d8a6bee93 Merge branch 'w/8.1/feature/S3C-2785_AddObjectLockForPutBucket' into tmp/octopus/w/8.2/feature/S3C-2785_AddObjectLockForPutBucket 2020-05-15 05:05:34 +00:00
Ilke e3b98648b8 Merge remote-tracking branch 'origin/feature/S3C-2785_AddObjectLockForPutBucket' into w/8.1/feature/S3C-2785_AddObjectLockForPutBucket 2020-05-14 21:53:10 -07:00
bert-e d33522db6b Merge branch 'w/8.1/bugfix/S3C-2880-update-aws-sdk' into tmp/octopus/w/8.2/bugfix/S3C-2880-update-aws-sdk 2020-05-14 19:24:53 +00:00
Dora Korpar 7287ef5243 Add suffix param in Promisify call 2020-05-14 12:24:23 -07:00
bert-e 0e1bee1d98 Merge branch 'w/8.1/bugfix/S3C-2880-update-aws-sdk' into tmp/octopus/w/8.2/bugfix/S3C-2880-update-aws-sdk 2020-05-14 19:05:41 +00:00
Dora Korpar f3136bfd0c -Async to -Promise 2020-05-14 12:04:48 -07:00
Dora Korpar de08fe3263 -Async to -Promise 2020-05-14 11:54:58 -07:00
bert-e da57d2ca42 Merge branch 'w/8.1/bugfix/S3C-2880-update-aws-sdk' into tmp/octopus/w/8.2/bugfix/S3C-2880-update-aws-sdk 2020-05-14 17:32:23 +00:00
Dora Korpar f23ffa4b01 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2880-update-aws-sdk' into w/8.1/bugfix/S3C-2880-update-aws-sdk 2020-05-14 10:24:19 -07:00
bert-e b3324d9c9c Merge branch 'w/8.1/bugfix/S3C-2726-updateArsenalDependency' into tmp/octopus/w/8.2/bugfix/S3C-2726-updateArsenalDependency 2020-04-24 21:19:02 +00:00
Jonathan Gramain 9e44017d2f Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2726-updateArsenalDependency' into w/8.1/bugfix/S3C-2726-updateArsenalDependency 2020-04-24 14:06:21 -07:00
Rahul Padigela fb0417e383 Merge remote-tracking branch 'origin/w/8.1/improvement/S3C-2804-remove-mongodb-tests' into w/8.2/improvement/S3C-2804-remove-mongodb-tests 2020-04-22 23:30:14 -07:00
Rahul Padigela 5c6c94e80d Merge remote-tracking branch 'origin/w/7.7/improvement/S3C-2804-remove-mongodb-tests' into w/8.1/improvement/S3C-2804-remove-mongodb-tests 2020-04-22 23:24:57 -07:00
bert-e 9a0e1f53d6 Merge branch 'w/8.1/bugfix/S3C-2408/mpu-overwrite' into tmp/octopus/w/8.2/bugfix/S3C-2408/mpu-overwrite 2020-04-22 03:43:36 +00:00
Rahul Padigela f0423bbe11 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2408/mpu-overwrite' into w/8.1/bugfix/S3C-2408/mpu-overwrite 2020-04-21 20:30:47 -07:00
bert-e 848262b6d7 Merge branches 'w/8.2/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' and 'q/2449/8.1/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' into tmp/octopus/q/8.2 2020-04-21 21:38:46 +00:00
bert-e 00772ea319 Merge branches 'w/8.1/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' and 'q/2449/7.7/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' into tmp/octopus/q/8.1 2020-04-21 21:38:45 +00:00
Rahul Padigela 9dc41db70a Merge remote-tracking branch 'origin/w/8.1/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' into w/8.2/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix 2020-04-21 13:52:08 -07:00
Rahul Padigela 54daba70e1 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix' into w/8.1/bugfix/S3C-2668_update_arsenal_for_tagging_utf8_fix 2020-04-21 13:45:49 -07:00
bert-e 8af0c52695 Merge branch 'w/8.1/bugfix/S3C-2571_SupportAcceptRangesHeader' into tmp/octopus/w/8.2/bugfix/S3C-2571_SupportAcceptRangesHeader 2020-04-18 06:29:48 +00:00
bert-e 882c91c817 Merge branch 'w/7.7/bugfix/S3C-2571_SupportAcceptRangesHeader' into tmp/octopus/w/8.1/bugfix/S3C-2571_SupportAcceptRangesHeader 2020-04-18 06:29:47 +00:00
bert-e d18b9eb058 Merge branch 'w/8.1/improvement/S3C-2749_TestUrlDuration' into tmp/octopus/w/8.2/improvement/S3C-2749_TestUrlDuration 2020-04-13 22:55:10 +00:00
Ilke 27c064fb46 update arsenal 2020-04-13 15:54:37 -07:00
bert-e 9e7a26cb07 Merge branch 'w/8.1/improvement/S3C-2749_TestUrlDuration' into tmp/octopus/w/8.2/improvement/S3C-2749_TestUrlDuration 2020-04-13 22:27:09 +00:00
Ilke 002416038e update 2020-04-13 15:26:53 -07:00
bert-e a8c37200f6 Merge branch 'w/8.1/improvement/S3C-2749_TestUrlDuration' into tmp/octopus/w/8.2/improvement/S3C-2749_TestUrlDuration 2020-04-13 22:19:45 +00:00
Ilke 7c91b8242c Merge remote-tracking branch 'origin/improvement/S3C-2749_TestUrlDuration' into w/8.1/improvement/S3C-2749_TestUrlDuration 2020-04-13 15:13:39 -07:00
bert-e 28cbcb755c Merge branch 'w/8.1/feature/S3C-2729-arsenal-update-for-7.7' into tmp/octopus/w/8.2/feature/S3C-2729-arsenal-update-for-7.7 2020-04-09 17:43:38 +00:00
bert-e 303f9e04d0 Merge branch 'feature/S3C-2729-arsenal-update-for-7.7' into tmp/octopus/w/8.1/feature/S3C-2729-arsenal-update-for-7.7 2020-04-09 17:43:37 +00:00
bert-e c01d24a7c9 Merge branches 'development/8.2' and 'w/8.1/feature/S3C-2729-arsenal-update-for-7.7' into tmp/octopus/w/8.2/feature/S3C-2729-arsenal-update-for-7.7 2020-04-08 23:35:09 +00:00
ilkescality 663b8fb889
Merge branch 'development/8.1' into w/8.1/feature/S3C-2729-arsenal-update-for-7.7 2020-04-08 16:34:59 -07:00
bert-e 5741387682 Merge branch 'w/8.1/bugfix/S3C-2542_add_default_logger_fields_for_backbeat_routes' into tmp/octopus/w/8.2/bugfix/S3C-2542_add_default_logger_fields_for_backbeat_routes 2020-04-07 20:16:02 +00:00
Taylor McKinnon 84c1ac06da Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2542_add_default_logger_fields_for_backbeat_routes' into w/8.1/bugfix/S3C-2542_add_default_logger_fields_for_backbeat_routes 2020-04-07 13:15:33 -07:00
bert-e 111ccddab9 Merge branch 'w/8.1/bugfix/S3C-2756_update_pykmip_ci_certificates' into tmp/octopus/w/8.2/bugfix/S3C-2756_update_pykmip_ci_certificates 2020-04-07 18:07:08 +00:00
bert-e cc1b341147 Merge branch 'w/7.7/bugfix/S3C-2756_update_pykmip_ci_certificates' into tmp/octopus/w/8.1/bugfix/S3C-2756_update_pykmip_ci_certificates 2020-04-07 18:07:08 +00:00
bert-e 85c02af52c Merge branch 'w/8.1/feature/S3C-2729-arsenal-update-for-7.7' into tmp/octopus/w/8.2/feature/S3C-2729-arsenal-update-for-7.7 2020-04-07 00:36:50 +00:00
Ilke 38ff53c2a0 Fix arsenal version for 8.1 2020-04-06 17:36:36 -07:00
bert-e 985e6567f2 Merge branch 'w/8.1/feature/S3C-2729-arsenal-update-for-7.7' into tmp/octopus/w/8.2/feature/S3C-2729-arsenal-update-for-7.7 2020-04-06 19:42:42 +00:00
Ilke d7a4f9299a Merge remote-tracking branch 'origin/feature/S3C-2729-arsenal-update-for-7.7' into w/8.1/feature/S3C-2729-arsenal-update-for-7.7 2020-04-06 12:36:05 -07:00
bert-e 22d4ae8c04 Merge branch 'w/8.1/bugfix/S3C-2714-fix-permissions' into tmp/octopus/w/8.2/bugfix/S3C-2714-fix-permissions 2020-03-19 22:14:01 +00:00
bert-e 9ec7252d29 Merge branch 'w/7.7/bugfix/S3C-2714-fix-permissions' into tmp/octopus/w/8.1/bugfix/S3C-2714-fix-permissions 2020-03-19 22:14:00 +00:00
bert-e 345d126488 Merge branch 'bugfix/ZENKO-1670-propagate-updates-if-master-version-is-gte' into tmp/octopus/w/8.2/bugfix/ZENKO-1670-propagate-updates-if-master-version-is-gte 2020-03-19 01:30:16 +00:00
Rahul Padigela 7bcf78634d bugfix: ZENKO-1670-propagate-updates-if-master-version 2020-03-18 18:28:34 -07:00
bert-e b20785150c Merge branch 'w/8.1/bugfix/S3C-2697-acl-perms-regression' into tmp/octopus/w/8.2/bugfix/S3C-2697-acl-perms-regression 2020-03-17 17:33:42 +00:00
Dora Korpar 7f50dfee79 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2697-acl-perms-regression' into w/8.1/bugfix/S3C-2697-acl-perms-regression 2020-03-17 10:22:11 -07:00
bert-e bfc2648257 Merge branch 'bugfix/ZENKO-2352-httpError424IfLocationDoesNotExist' into tmp/octopus/w/8.2/bugfix/ZENKO-2352-httpError424IfLocationDoesNotExist 2020-02-27 23:14:02 +00:00
Jonathan Gramain a694f210e6 bugfix: ZENKO-2352 arsenal dependency update
Also remove an error log when LocationNotFound error is returned by
the cloud backend.
2020-02-27 15:13:42 -08:00
bert-e 6d9f3638b3 Merge branches 'w/8.2/bugfix/S3C-2502-vault-req-ip-header-port' and 'q/2378/8.1/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/q/8.2 2020-02-26 19:43:24 +00:00
bert-e 7f5caa7da6 Merge branches 'w/8.1/bugfix/S3C-2502-vault-req-ip-header-port' and 'q/2378/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/q/8.1 2020-02-26 19:43:24 +00:00
bert-e dff6e95e44 Merge branch 'w/8.1/bugfix/S3C-2502-vault-req-ip-header-port' into tmp/octopus/w/8.2/bugfix/S3C-2502-vault-req-ip-header-port 2020-02-26 19:06:21 +00:00
Dora Korpar ff128a1713 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2502-vault-req-ip-header-port' into w/8.1/bugfix/S3C-2502-vault-req-ip-header-port 2020-02-26 11:05:06 -08:00
bert-e 2c7ed368cf Merge branches 'w/8.2/bugfix/S3C-2544-diff-acct-metrics-port-2' and 'q/2363/8.1/bugfix/S3C-2544-diff-acct-metrics-port-2' into tmp/octopus/q/8.2 2020-02-26 00:47:04 +00:00
bert-e b9c96fd052 Merge branches 'w/8.1/bugfix/S3C-2544-diff-acct-metrics-port-2' and 'q/2363/7.7/bugfix/S3C-2544-diff-acct-metrics-port-2' into tmp/octopus/q/8.1 2020-02-26 00:47:03 +00:00
bert-e 513fa24031 Merge branches 'w/8.2/bugfix/S3C-2597-bucket-policy-can-id-port' and 'q/2368/8.1/bugfix/S3C-2597-bucket-policy-can-id-port' into tmp/octopus/q/8.2 2020-02-25 23:37:40 +00:00
bert-e 4644fdc30a Merge branches 'w/8.1/bugfix/S3C-2597-bucket-policy-can-id-port' and 'q/2368/7.7/bugfix/S3C-2597-bucket-policy-can-id-port' into tmp/octopus/q/8.1 2020-02-25 23:37:39 +00:00
bert-e eef183edf2 Merge branch 'w/8.1/bugfix/S3C-2597-bucket-policy-can-id-port' into tmp/octopus/w/8.2/bugfix/S3C-2597-bucket-policy-can-id-port 2020-02-25 06:08:13 +00:00
bert-e e6350ea3e8 Merge branch 'w/7.7/bugfix/S3C-2597-bucket-policy-can-id-port' into tmp/octopus/w/8.1/bugfix/S3C-2597-bucket-policy-can-id-port 2020-02-25 06:08:12 +00:00
bert-e 1137e3a6b6 Merge branch 'w/8.1/bugfix/S3C-2544-diff-acct-metrics-port-2' into tmp/octopus/w/8.2/bugfix/S3C-2544-diff-acct-metrics-port-2 2020-02-25 06:01:11 +00:00
bert-e 5096cbc3e3 Merge branch 'w/7.7/bugfix/S3C-2544-diff-acct-metrics-port-2' into tmp/octopus/w/8.1/bugfix/S3C-2544-diff-acct-metrics-port-2 2020-02-25 06:01:11 +00:00
bert-e e7835064dc Merge branches 'w/8.2/bugfix/S3C-2502-vault-req-ip-header' and 'q/2333/8.1/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/q/8.2 2020-02-24 20:43:46 +00:00
bert-e 7720a82fd9 Merge branches 'w/8.1/bugfix/S3C-2502-vault-req-ip-header' and 'q/2333/7.7/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/q/8.1 2020-02-24 20:43:46 +00:00
bert-e d72319c732 Merge branches 'development/8.2' and 'w/8.1/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/w/8.2/bugfix/S3C-2502-vault-req-ip-header 2020-02-24 20:05:40 +00:00
bert-e 74935b7bb1 Merge branch 'w/7.7/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-24 20:05:39 +00:00
Dora Korpar 5b9f414bbb Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2502-vault-req-ip-header' into w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-24 12:05:19 -08:00
Dora Korpar f51b2d373c Merge remote-tracking branch 'origin/development/8.1' into w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-24 12:01:44 -08:00
bert-e c6b1f214c6 Merge branch 'improvement/ZENKO-2340-rename-zenko-registry-namespace' into tmp/octopus/w/8.2/improvement/ZENKO-2340-rename-zenko-registry-namespace 2020-02-24 14:26:10 +00:00
Thomas Carmet 7f65d5f417 ci: ZENKO-2340 rename the docker registry namespace from zenko-dev to zenko 2020-02-24 15:25:16 +01:00
bert-e 3c975e181a Merge branch 'w/8.1/bugfix/S3C-2597-bucket-policy-can-id' into tmp/octopus/w/8.2/bugfix/S3C-2597-bucket-policy-can-id 2020-02-21 07:04:20 +00:00
Dora Korpar 7b74202223 fix test 2020-02-20 23:04:00 -08:00
bert-e 8ecdd07fd6 Merge branch 'w/8.1/bugfix/S3C-2597-bucket-policy-can-id' into tmp/octopus/w/8.2/bugfix/S3C-2597-bucket-policy-can-id 2020-02-20 19:29:43 +00:00
Dora Korpar 21beb7fcdf Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2597-bucket-policy-can-id' into w/8.1/bugfix/S3C-2597-bucket-policy-can-id 2020-02-20 11:26:34 -08:00
bert-e 7af23a1526 Merge branch 'w/8.1/bugfix/S3C-2544-diff-acct-metrics' into tmp/octopus/w/8.2/bugfix/S3C-2544-diff-acct-metrics 2020-02-14 06:23:11 +00:00
bert-e 27ed0058a6 Merge branch 'w/7.7/bugfix/S3C-2544-diff-acct-metrics' into tmp/octopus/w/8.1/bugfix/S3C-2544-diff-acct-metrics 2020-02-14 06:23:11 +00:00
Dora Korpar 86c9f54541 fix arsenal version 2020-02-06 16:42:54 -08:00
Dora Korpar 6091cf1b36 fix arsenal version 2020-02-06 16:41:39 -08:00
bert-e 2f7eb3b9be Merge branch 'w/8.1/bugfix/S3C-2502-vault-req-ip-header' into tmp/octopus/w/8.2/bugfix/S3C-2502-vault-req-ip-header 2020-02-06 23:32:29 +00:00
Dora Korpar 059299f981 Merge remote-tracking branch 'origin/w/7.7/bugfix/S3C-2502-vault-req-ip-header' into w/8.1/bugfix/S3C-2502-vault-req-ip-header 2020-02-06 15:18:47 -08:00
bert-e fdcbd315d0 Merge branch 'w/8.1/bugfix/S3C-2564-kms-remove-ssl-option-quotes' into tmp/octopus/w/8.2/bugfix/S3C-2564-kms-remove-ssl-option-quotes 2020-01-17 08:44:46 +00:00
bert-e 6b4671b291 Merge branch 'w/7.6/bugfix/S3C-2564-kms-remove-ssl-option-quotes' into tmp/octopus/w/8.1/bugfix/S3C-2564-kms-remove-ssl-option-quotes 2020-01-17 08:44:46 +00:00
bert-e 605fdd9254 Merge branch 'w/8.1/bugfix/S3C-2503-allow-same-email-acl' into tmp/octopus/w/8.2/bugfix/S3C-2503-allow-same-email-acl 2020-01-16 21:18:16 +00:00
bert-e e77aeeb3a5 Merge branch 'w/7.6/bugfix/S3C-2503-allow-same-email-acl' into tmp/octopus/w/8.1/bugfix/S3C-2503-allow-same-email-acl 2020-01-16 21:18:15 +00:00
bert-e 4bf24747c8 Merge branch 'w/8.1/bugfix/S3C-2582-update-maven-repo' into tmp/octopus/w/8.2/bugfix/S3C-2582-update-maven-repo 2020-01-16 18:17:09 +00:00
bert-e 85e178d0fa Merge branch 'w/7.6/bugfix/S3C-2582-update-maven-repo' into tmp/octopus/w/8.1/bugfix/S3C-2582-update-maven-repo 2020-01-16 18:17:09 +00:00
bert-e d4a7c1d7c8 Merge branch 'bugfix/ZENKO-2277-fix-cloudserver-image-build' into tmp/octopus/w/8.2/bugfix/ZENKO-2277-fix-cloudserver-image-build 2020-01-13 09:19:53 +00:00
Thomas Carmet 1e73d90126 ZENKO-2277 moving away from a floating tag on Docker image 2020-01-13 10:19:05 +01:00
bert-e 8e5353929c Merge branch 'bugfix/ZENKO-2277-fix-cloudserver-image-build' into tmp/octopus/w/8.2/bugfix/ZENKO-2277-fix-cloudserver-image-build 2020-01-10 16:26:27 +00:00
Thomas Carmet bea2bf9d60 ZENKO-2277 installing curl and gpg due to parent image upgrade
as the image node:10-slim received an update and removed the commands
curl and gpg, we lost the ability to build cloudserver.
We're now installing them specifically.
2020-01-10 17:24:21 +01:00
bert-e a240db4e03 Merge branches 'w/8.2/bugfix/ZENKO-2200/report' and 'q/2231/8.1/bugfix/ZENKO-2200/report' into tmp/octopus/q/8.2 2020-01-03 20:21:28 +00:00
bert-e a4de12e331 Merge branch 'bugfix/ZENKO-2200/report' into q/8.1 2020-01-03 20:21:28 +00:00
bert-e 0b7e2e6ed8 Merge branch 'w/8.1/bugfix/ZENKOIO-20/update-aws-sdk' into tmp/octopus/w/8.2/bugfix/ZENKOIO-20/update-aws-sdk 2020-01-02 22:39:34 +00:00
Dora Korpar 1f2174e224 Merge remote-tracking branch 'origin/w/7.6/bugfix/ZENKOIO-20/update-aws-sdk' into w/8.1/bugfix/ZENKOIO-20/update-aws-sdk 2020-01-02 14:25:13 -08:00
bert-e ed4cd8cad0 Merge branches 'w/8.2/bugfix/S3C-2527-sproxydclientDependencyUpdate' and 'q/2293/8.1/bugfix/S3C-2527-sproxydclientDependencyUpdate' into tmp/octopus/q/8.2 2019-12-30 21:48:11 +00:00
bert-e 3239927d1a Merge branch 'w/8.1/bugfix/S3C-2527-sproxydclientDependencyUpdate' into tmp/octopus/q/8.1 2019-12-30 21:48:10 +00:00
bert-e 5e9b57b883 Merge branch 'bugfix/ZENKO-2261-arsenalDependencyUpdate' into tmp/octopus/w/8.2/bugfix/ZENKO-2261-arsenalDependencyUpdate 2019-12-30 20:19:39 +00:00
Jonathan Gramain b6cc788e97 bugfix: ZENKO-2261 effectively reuse sproxyd connections
Dependency update on Arsenal, which transitively depends on updated
sproxydclient.
2019-12-30 12:08:00 -08:00
bert-e 97c95f3bd9 Merge branch 'w/8.1/bugfix/S3C-2527-sproxydclientDependencyUpdate' into tmp/octopus/w/8.2/bugfix/S3C-2527-sproxydclientDependencyUpdate 2019-12-30 19:29:18 +00:00
Jonathan Gramain 9529fda244 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2527-sproxydclientDependencyUpdate' into w/8.1/bugfix/S3C-2527-sproxydclientDependencyUpdate 2019-12-30 11:22:22 -08:00
bert-e 4332d0a10f Merge branch 'bugfix/ZENKO-2250-reworkChunkedUploadStreamHandling' into tmp/octopus/w/8.2/bugfix/ZENKO-2250-reworkChunkedUploadStreamHandling 2019-12-11 18:49:40 +00:00
Jonathan Gramain 16b65ed208 bugfix: ZENKO-2250 rework chunked upload stream handling
Update Arsenal dependency and re-enable streaming V4 functional test
2019-12-11 10:45:52 -08:00
bert-e a78da8c24c Merge branch 'w/8.1/bugfix/S3C-2504-cleanupBucketAfterTest' into tmp/octopus/w/8.2/bugfix/S3C-2504-cleanupBucketAfterTest 2019-12-11 01:21:09 +00:00
Jonathan Gramain d3d8229664 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2504-cleanupBucketAfterTest' into w/8.1/bugfix/S3C-2504-cleanupBucketAfterTest 2019-12-10 17:18:28 -08:00
bert-e 4b8b60c7c1 Merge branch 'w/8.1/bugfix/S3C-2504-revertYarnChangeOnUtapiDependencies' into tmp/octopus/w/8.2/bugfix/S3C-2504-revertYarnChangeOnUtapiDependencies 2019-12-10 22:50:24 +00:00
bert-e 985f49b5cf Merge branch 'w/7.6/bugfix/S3C-2504-revertYarnChangeOnUtapiDependencies' into tmp/octopus/w/8.1/bugfix/S3C-2504-revertYarnChangeOnUtapiDependencies 2019-12-10 22:50:23 +00:00
bert-e 7351a778cb Merge branch 'w/8.1/bugfix/S3C-2504-reworkChunkedUploadStreamHandling' into tmp/octopus/w/8.2/bugfix/S3C-2504-reworkChunkedUploadStreamHandling 2019-12-10 19:38:31 +00:00
Jonathan Gramain 3e2d1f7ab3 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2504-reworkChunkedUploadStreamHandling' into w/8.1/bugfix/S3C-2504-reworkChunkedUploadStreamHandling 2019-12-10 11:26:46 -08:00
bert-e 0f999147e5 Merge branches 'w/8.2/feature/ZENKO-2238-post-merge-deploy-to-new-registry' and 'q/2265/8.1/feature/ZENKO-2238-post-merge-deploy-to-new-registry' into tmp/octopus/q/8.2 2019-12-03 00:54:51 +00:00
bert-e cdfda7c8fb Merge branch 'feature/ZENKO-2238-post-merge-deploy-to-new-registry' into q/8.1 2019-12-03 00:54:50 +00:00
bert-e dbdbf3c2c2 Merge branch 'feature/ZENKO-2238-post-merge-deploy-to-new-registry' into tmp/octopus/w/8.2/feature/ZENKO-2238-post-merge-deploy-to-new-registry 2019-12-02 22:50:30 +00:00
Thomas Carmet 319988c310 ZENKO-2238 changing worker type to kube pod
An opinionated change regarding the build environment, we may
require more tooling in the future when building cloudserver
so it is better that we stop executing docker commands on eve's
backend.
2019-11-27 16:04:25 -08:00
Thomas Carmet 3b111ce5d1 ZENKO-2238 now pushing docker image to registry.scality.com
Creating a zenko-dev workspace for all zenko related images.

We're also removing push that were made to docker hub because
the tagging was badly made and we're also letting the docker hub
build system handling that part for now.
2019-11-27 16:04:25 -08:00
bert-e 3a21f7fd81 Merge branch 'w/7.6/bugfix/ZENKO-2240-fixup-get-product-version' into tmp/octopus/w/8.1/bugfix/ZENKO-2240-fixup-get-product-version 2019-11-27 19:09:28 +00:00
bert-e 796da8b5ac Merge branch 'w/8.1/bugfix/ZENKO-2240-fixup-get-product-version' into tmp/octopus/w/8.2/bugfix/ZENKO-2240-fixup-get-product-version 2019-11-27 19:09:28 +00:00
bert-e 011c977acb Merge branch 'w/8.1/feature/S3C-2235-injectAccount' into tmp/octopus/w/8.2/feature/S3C-2235-injectAccount 2019-11-25 13:46:50 +00:00
Anurag Mittal 27df131a31
ft: S3C-2235-updated vaultclient 2019-11-25 19:16:24 +05:30
bert-e c2c6e231c6 Merge branch 'w/8.1/feature/S3C-2235-injectAccount' into tmp/octopus/w/8.2/feature/S3C-2235-injectAccount 2019-11-25 13:42:39 +00:00
Anurag Mittal b09655759d
Merge remote-tracking branch 'origin/feature/S3C-2235-injectAccount' into w/8.1/feature/S3C-2235-injectAccount 2019-11-25 19:11:11 +05:30
bert-e f5942e3ee5 Merge branch 'w/8.1/improvement/S3C-2193-sse-s3-2' into tmp/octopus/w/8.2/improvement/S3C-2193-sse-s3-2 2019-11-21 20:04:31 +00:00
naren-scality f3331b3d3b merge changes 2019-11-21 12:03:53 -08:00
bert-e 28797860c6 Merge branch 'w/8.1/improvement/S3C-2193-sse-s3-2' into tmp/octopus/w/8.2/improvement/S3C-2193-sse-s3-2 2019-11-21 19:58:34 +00:00
naren-scality c65312411c Merge remote-tracking branch 'origin/improvement/S3C-2193-sse-s3-2' into w/8.1/improvement/S3C-2193-sse-s3-2 2019-11-21 11:54:13 -08:00
Rached Ben Mustapha adf5ffb1e2 feature: add metrics and secure channel servers
This allows using specialized connections for each usage:
- configuration push
- metrics collection (to be later moved to prom scraping)
- secure channel

This way each connection handler can be restricted in what it can
do for reliability and security purposes.
2019-11-20 12:21:42 -08:00
vrancurel 00a2cc43c9 bf: special treatment for MPU MD5 if mdonly
Rclone originally does not handle correctly the MD5 for MPU objects,
because their Etag is structure as "md5chksum-numparts".
Therefore a patch was made to not only transmit an x-amz-meta-md5chksum like
before but also to include an additional x-amz-meta-md5numparts.
This fix will check for this new header and reconstitute the MD5 correcly.
For non-MPU objects the behavior is unchanged and therefore backward
compatible.
2019-11-15 10:44:39 -08:00
Dasha Gurova fa9ca65eb9
Merge branch 'development/8.2' into documentation/ZENKOIO-116-smallUpdatesToDocs 2019-11-08 11:54:48 -08:00
Dasha Gurova 3c015f6de8 Rached's comment 2019-11-08 11:53:55 -08:00
bert-e 3e4d249495 Merge branch 'w/7.6/bugfix/S3C-2517-fixCrashWithInvalidChunkedUpload' into tmp/octopus/w/8.1/bugfix/S3C-2517-fixCrashWithInvalidChunkedUpload 2019-11-07 00:38:33 +00:00
bert-e b65b5d4fb7 Merge branch 'w/8.1/bugfix/S3C-2517-fixCrashWithInvalidChunkedUpload' into tmp/octopus/w/8.2/bugfix/S3C-2517-fixCrashWithInvalidChunkedUpload 2019-11-07 00:38:33 +00:00
Dasha Gurova 3873d1e0d5 suggestions from review 2019-11-06 12:51:56 -08:00
Dasha Gurova 0dd8e5cc14 docs: ZENKOIO-116 small updates 2019-11-06 11:38:50 -08:00
bert-e 90a26d3432 Merge branch 'bugfix/ZENKO-2200/report' into tmp/octopus/w/8.2/bugfix/ZENKO-2200/report 2019-10-22 20:19:56 +00:00
Nicolas Humbert 0c797cf84e ZENKO-2200 Keep the .git/HEAD file to properly report version 2019-10-22 13:18:18 -07:00
bert-e 3dd86aa30a Merge branches 'w/8.2/bugfix/S3C-2392-mpu-tagging' and 'q/2219/8.1/bugfix/S3C-2392-mpu-tagging' into tmp/octopus/q/8.2 2019-10-17 18:29:52 +00:00
bert-e 8c8902cc74 Merge branches 'w/8.1/bugfix/S3C-2392-mpu-tagging' and 'q/2219/7.6/bugfix/S3C-2392-mpu-tagging' into tmp/octopus/q/8.1 2019-10-17 18:29:51 +00:00
bert-e 1ce0a4db92 Merge branches 'w/8.2/bugfix/S3C-2206/x_forwarded_for_ip_extraction' and 'q/2201/8.1/bugfix/S3C-2206/x_forwarded_for_ip_extraction' into tmp/octopus/q/8.2 2019-10-17 16:44:35 +00:00
bert-e 29a174a0bc Merge branches 'w/8.1/bugfix/S3C-2206/x_forwarded_for_ip_extraction' and 'q/2201/7.6/bugfix/S3C-2206/x_forwarded_for_ip_extraction' into tmp/octopus/q/8.1 2019-10-17 16:44:34 +00:00
bert-e 360cf0287a Merge branch 'w/8.1/bugfix/S3C-2206/x_forwarded_for_ip_extraction' into tmp/octopus/w/8.2/bugfix/S3C-2206/x_forwarded_for_ip_extraction 2019-10-16 22:39:25 +00:00
naren-scality c7b6b752fd Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2206/x_forwarded_for_ip_extraction' into w/8.1/bugfix/S3C-2206/x_forwarded_for_ip_extraction 2019-10-16 15:30:20 -07:00
bert-e fc6875d640 Merge branch 'w/8.1/bugfix/S3C-2392-mpu-tagging' into tmp/octopus/w/8.2/bugfix/S3C-2392-mpu-tagging 2019-10-16 21:38:17 +00:00
Dora Korpar 39fcbdcb49 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2392-mpu-tagging' into w/8.1/bugfix/S3C-2392-mpu-tagging 2019-10-16 14:27:17 -07:00
bert-e 25e28357d1 Merge branches 'w/8.2/bugfix/S3C-2392-update-aws-sdk' and 'q/2212/8.1/bugfix/S3C-2392-update-aws-sdk' into tmp/octopus/q/8.2 2019-10-16 20:11:28 +00:00
bert-e a178a16087 Merge branches 'w/8.1/bugfix/S3C-2392-update-aws-sdk' and 'q/2212/7.6/bugfix/S3C-2392-update-aws-sdk' into tmp/octopus/q/8.1 2019-10-16 20:11:28 +00:00
bert-e 7ca6288ba6 Merge branch 'w/8.1/bugfix/S3C-2392-update-aws-sdk' into tmp/octopus/w/8.2/bugfix/S3C-2392-update-aws-sdk 2019-10-16 17:15:43 +00:00
Dora Korpar 77c0476dd4 Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2392-update-aws-sdk' into w/8.1/bugfix/S3C-2392-update-aws-sdk 2019-10-16 10:02:04 -07:00
bert-e bc2897bf13 Merge branches 'w/8.2/bugfix/S3C-2397/invalid-range' and 'q/2194/8.1/bugfix/S3C-2397/invalid-range' into tmp/octopus/q/8.2 2019-10-16 07:04:33 +00:00
bert-e 99f240b2be Merge branches 'w/8.1/bugfix/S3C-2397/invalid-range' and 'q/2194/7.6/bugfix/S3C-2397/invalid-range' into tmp/octopus/q/8.1 2019-10-16 07:04:33 +00:00
bert-e 8f17fe87e8 Merge branch 'w/8.1/bugfix/S3C-2397/invalid-range' into tmp/octopus/w/8.2/bugfix/S3C-2397/invalid-range 2019-10-09 23:06:21 +00:00
bert-e bf9b8f138e Merge branch 'w/7.6/bugfix/S3C-2397/invalid-range' into tmp/octopus/w/8.1/bugfix/S3C-2397/invalid-range 2019-10-09 23:06:20 +00:00
bert-e eb42e2128a Merge branches 'w/8.2/bugfix/ZENKO-2131' and 'q/2190/8.1/bugfix/ZENKO-2131' into tmp/octopus/q/8.2 2019-10-08 00:00:37 +00:00
bert-e d9fd24e16e Merge branch 'bugfix/ZENKO-2131' into q/8.1 2019-10-08 00:00:37 +00:00
bert-e 206551847e Merge branch 'bugfix/ZENKO-2131' into tmp/octopus/w/8.2/bugfix/ZENKO-2131 2019-10-07 23:21:42 +00:00
Salim a58331bc34 bf: fix mdOnly version ID
ZENKOIO-107
This commit fixes a bug where "metadata only" puts used for ingestion do not
add the server version ID. Without this, getting older versions of an ingested
object would fail.
2019-10-07 15:48:09 -07:00
Salim 163563bc80 bf: fix ingest MD5 inconsistencies
ZENKO-2131
This commit fixes a bug where cloudserver can crash if there is an ingestion
request without the x-amz-meta-md5chksum header. By assigning null to the value
in the event that it is undefined, this behavior is prevented.
2019-10-07 15:23:48 -07:00
bert-e dc5efbafdc Merge branch 'w/8.1/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens' into tmp/octopus/w/8.2/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens 2019-10-07 21:06:09 +00:00
naren-scality de3ff7507e Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens' into w/8.1/bugfix/S3C-1805/bucket_name_with_consecutive_hyphens 2019-10-07 13:51:40 -07:00
bert-e 1f201b8b3a Merge branches 'development/8.2' and 'w/8.1/bugfix/S3C-2410/handle_unsupported_operations' into tmp/octopus/w/8.2/bugfix/S3C-2410/handle_unsupported_operations 2019-10-02 17:28:15 +00:00
bert-e 8bbb93b931 Merge branches 'development/8.1' and 'w/7.6/bugfix/S3C-2410/handle_unsupported_operations' into tmp/octopus/w/8.1/bugfix/S3C-2410/handle_unsupported_operations 2019-10-02 17:28:14 +00:00
bert-e bd8f92ee6f Merge branches 'w/8.2/improvement/ZENKOIO-100' and 'q/2157/8.1/improvement/ZENKOIO-100' into tmp/octopus/q/8.2 2019-10-01 22:03:41 +00:00
bert-e 14023dee2d Merge branch 'improvement/ZENKOIO-100' into q/8.1 2019-10-01 22:03:41 +00:00
Salim 6322e527af Merge branch 'improvement/ZENKOIO-100' into w/8.2/improvement/ZENKOIO-100 2019-10-01 13:26:44 -07:00
bert-e 9c0ef12b10 Merge branch 'w/8.1/bugfix/S3C-2410/handle_unsupported_operations' into tmp/octopus/w/8.2/bugfix/S3C-2410/handle_unsupported_operations 2019-10-01 18:01:37 +00:00
bert-e 82fa88c40e Merge branch 'w/7.6/bugfix/S3C-2410/handle_unsupported_operations' into tmp/octopus/w/8.1/bugfix/S3C-2410/handle_unsupported_operations 2019-10-01 18:01:36 +00:00
vrancurel 2aab4be5d4 feature: workflow engine operator proxy 2019-09-27 09:59:03 -07:00
Salim 0bbe82b5b9 bf: fix putObjectPart typo 2019-09-26 16:18:04 -07:00
Salim ec720c3040 bf: fix metrics reporting of request size
ZENKOIO-101
This fixes a bug where Cloudserver will reports the byte size for a PUT request as NewSizeLength - OldSizeLength causing inaccuracies when putting Objects into a non versioned bucket. (e.g. I put a 100MB file into a bucket, the counter will reflect accurately, but if I put the same object again, the counter won't change even though 100MB just passed through).
2019-09-26 16:18:04 -07:00
Salim d1c59e5c06 improvement: add mdonly ingestion metrics 2019-09-26 16:18:03 -07:00
bert-e f04608a0ce Merge branches 'w/8.2/bugfix/S3C-2440-get-policy-xml-error' and 'q/2154/8.1/bugfix/S3C-2440-get-policy-xml-error' into tmp/octopus/q/8.2 2019-09-25 20:41:21 +00:00
bert-e b1082add04 Merge branches 'w/8.1/bugfix/S3C-2440-get-policy-xml-error' and 'q/2154/7.6/bugfix/S3C-2440-get-policy-xml-error' into tmp/octopus/q/8.1 2019-09-25 20:41:20 +00:00
bert-e 19415e36d5 Merge branch 'w/8.1/bugfix/S3C-2440-get-policy-xml-error' into tmp/octopus/w/8.2/bugfix/S3C-2440-get-policy-xml-error 2019-09-24 20:07:04 +00:00
Dora Korpar 8d5db7da81 Merge remote-tracking branch 'origin/bugfix/S3C-2440-get-policy-xml-error' into w/8.1/bugfix/S3C-2440-get-policy-xml-error 2019-09-24 12:55:06 -07:00
bert-e 589a465c57 Merge branches 'w/8.2/bugfix/S3C-2439-precise-request-types-mpu' and 'q/2151/8.1/bugfix/S3C-2439-precise-request-types-mpu' into tmp/octopus/q/8.2 2019-09-24 01:10:11 +00:00
bert-e fb67c4b6e9 Merge branches 'w/8.1/bugfix/S3C-2439-precise-request-types-mpu' and 'q/2151/7.6/bugfix/S3C-2439-precise-request-types-mpu' into tmp/octopus/q/8.1 2019-09-24 01:10:11 +00:00
bert-e 957631cea0 Merge branch 'w/8.1/bugfix/S3C-2439-precise-request-types-mpu' into tmp/octopus/w/8.2/bugfix/S3C-2439-precise-request-types-mpu 2019-09-23 22:52:39 +00:00
bert-e 0a48822254 Merge branch 'bugfix/S3C-2439-precise-request-types-mpu' into tmp/octopus/w/8.1/bugfix/S3C-2439-precise-request-types-mpu 2019-09-23 22:52:39 +00:00
bert-e 0faa470247 Merge branch 'w/8.1/bugfix/S3C-2399/incorrect_part_number_size' into tmp/octopus/w/8.2/bugfix/S3C-2399/incorrect_part_number_size 2019-09-20 07:15:46 +00:00
naren-scality a3c4dd865c Merge remote-tracking branch 'origin/w/7.6/bugfix/S3C-2399/incorrect_part_number_size' into w/8.1/bugfix/S3C-2399/incorrect_part_number_size 2019-09-20 00:04:48 -07:00
bert-e aa874cf6fd Merge branch 'w/8.1/bugfix/S3C-2435-fix-obj-action-parse' into tmp/octopus/w/8.2/bugfix/S3C-2435-fix-obj-action-parse 2019-09-17 22:55:41 +00:00
Dora Korpar d24aa96014 Merge remote-tracking branch 'origin/bugfix/S3C-2435-fix-obj-action-parse' into w/8.1/bugfix/S3C-2435-fix-obj-action-parse 2019-09-17 15:46:19 -07:00
bert-e 3ab5d23dab Merge branch 'w/8.2/bugfix/S3C-2391/list_objects_v2_no_fetch_owner_corrections' into tmp/octopus/q/8.2 2019-09-17 16:40:48 +00:00
bert-e fa4d3a934e Merge branch 'w/7.6/bugfix/S3C-2391/list_objects_v2_no_fetch_owner_corrections' into tmp/octopus/w/8.1/bugfix/S3C-2391/list_objects_v2_no_fetch_owner_corrections 2019-09-13 22:58:53 +00:00
Salim 6ea92cf9ce ft: enable AWS and Ceph OOB on reportHandler 2019-09-12 15:23:47 -07:00
bert-e 9615d51c9e Merge branch 'w/7.6/bugfix/S3C-2169/invalid_user_metadata_header' into tmp/octopus/w/8.1/bugfix/S3C-2169/invalid_user_metadata_header 2019-09-12 19:03:18 +00:00
bert-e 74d7c53ffb Merge branch 'bugfix/ZENKO-2101/update-utapi-dependency' into q/8.1 2019-09-11 23:59:07 +00:00
bennettbuchanan 0e3dc452c0 bugfix: ZENKO-2101 Update UTAPI dependency 2019-09-11 16:07:56 -07:00
bert-e 9bd78d311c Merge branches 'w/8.1/bugfix/S3C-2169/invalid_user_metadata_header' and 'q/2123/7.6/bugfix/S3C-2169/invalid_user_metadata_header' into tmp/octopus/q/8.1 2019-09-11 22:53:37 +00:00
bert-e dbbadac2ee Merge branch 'feature/ZENKO-1616_Add_fixes_for_update_to_ObjectMD_model' into q/8.1 2019-09-11 22:53:04 +00:00
bert-e 86459b0b5a Merge branch 'bugfix/S3C-2424-bucket-policy-api-perms' into tmp/octopus/w/8.1/bugfix/S3C-2424-bucket-policy-api-perms 2019-09-11 18:21:56 +00:00
bert-e eaa9495b4d Merge branch 'w/7.6/bugfix/S3C-2169/invalid_user_metadata_header' into tmp/octopus/w/8.1/bugfix/S3C-2169/invalid_user_metadata_header 2019-09-11 17:57:16 +00:00
Salim b30263bcc6 improvement: add OOB reporting options for supported locations 2019-09-03 12:34:42 -07:00
bert-e 323885394d Merge branch 'w/7.5/bugfix/S3C-2052/revert-changes-stabilization' into tmp/octopus/w/8.1/bugfix/S3C-2052/revert-changes-stabilization 2019-08-29 20:52:59 +00:00
bert-e 2b72415a83 Merge branches 'w/8.1/bugfix/S3C-2052/revert-changes' and 'q/2107/7.5/bugfix/S3C-2052/revert-changes' into tmp/octopus/q/8.1 2019-08-29 19:29:45 +00:00
bbuchanan9 989530a41b Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2052/revert-changes' into w/8.1/bugfix/S3C-2052/revert-changes 2019-08-29 12:08:18 -07:00
bert-e 6e54403319 Merge branch 'w/7.5/bugfix/S3C-2052/revert-api-changes-stabilization' into tmp/octopus/w/8.1/bugfix/S3C-2052/revert-api-changes-stabilization 2019-08-28 23:53:19 +00:00
bert-e bd38e24ed3 Merge branches 'w/8.1/bugfix/S3C-2052/revert-api-changes' and 'q/2101/7.5/bugfix/S3C-2052/revert-api-changes' into tmp/octopus/q/8.1 2019-08-28 23:01:48 +00:00
bbuchanan9 85285f9356 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2052/revert-api-changes' into w/8.1/bugfix/S3C-2052/revert-api-changes 2019-08-28 15:21:50 -07:00
Taylor McKinnon b3d6cd1253 ft(ZENKO-1616): Add fixes for new ObjectMD properties 2019-08-27 10:21:07 -07:00
bert-e 7f36eff8c3 Merge branch 'w/7.5/bugfix/S3C-2274/exit-process' into tmp/octopus/w/8.1/bugfix/S3C-2274/exit-process 2019-08-27 16:09:24 +00:00
bert-e f85c3ca489 Merge branch 'bugfix/S3C-2412-bucket-pol-principal-eval' into tmp/octopus/w/8.1/bugfix/S3C-2412-bucket-pol-principal-eval 2019-08-22 21:45:52 +00:00
bert-e 2c132289e8 Merge branch 'w/7.5/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs-forwardport' into tmp/octopus/w/8.1/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs-forwardport 2019-08-20 20:46:48 +00:00
bert-e d5e5719843 Merge branches 'w/8.1/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs' and 'q/2074/7.5/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs' into tmp/octopus/q/8.1 2019-08-20 19:56:20 +00:00
bbuchanan9 daf841cf53 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs' into w/8.1/bugfix/S3C-2052/delete-orphaned-data-remaining-APIs 2019-08-20 11:15:10 -07:00
Dora Korpar 2ba697f540 Merge remote-tracking branch 'origin/bugfix/S3C-2396-fix-bucket-policy-parsing' into w/8.1/bugfix/S3C-2396-fix-bucket-policy-parsing 2019-08-20 10:42:46 -07:00
bert-e b545fc47d8 Merge branches 'w/8.1/bugfix/S3C-2052/delete-orphaned-data-fwdpt' and 'q/2077/7.5/bugfix/S3C-2052/delete-orphaned-data-fwdpt' into tmp/octopus/q/8.1 2019-08-19 23:26:52 +00:00
bert-e dd76b681c9 Merge branches 'w/8.1/improvement/S3C-2365-install-yarn' and 'q/2067/7.5/improvement/S3C-2365-install-yarn' into tmp/octopus/q/8.1 2019-08-19 23:04:28 +00:00
bert-e 726a4b2aa8 Merge branch 'w/7.5/bugfix/S3C-2052/delete-orphaned-data-fwdpt' into tmp/octopus/w/8.1/bugfix/S3C-2052/delete-orphaned-data-fwdpt 2019-08-19 22:48:19 +00:00
anurag4dsb bac9a10c49
updated package.json and yarn.lock 2019-08-16 10:52:23 -07:00
anurag4dsb e4416a9687
Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2365-install-yarn' into w/8.1/improvement/S3C-2365-install-yarn 2019-08-14 16:04:42 -07:00
bbuchanan9 d275a76fea bugfix: S3C-2052 Updates for Arsenal backend 2019-08-13 18:32:01 -07:00
bbuchanan9 6f1619b1cc Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2052/delete-orphaned-data' into w/8.1/bugfix/S3C-2052/delete-orphaned-data 2019-08-13 18:29:56 -07:00
bert-e e21594fb3b Merge branches 'w/8.1/feature/S3C-2371-apis-check-bucket-policy' and 'q/2044/7.5/feature/S3C-2371-apis-check-bucket-policy' into tmp/octopus/q/8.1 2019-08-13 22:27:59 +00:00
Dora Korpar eda89806ba Merge remote-tracking branch 'origin/feature/S3C-2371-apis-check-bucket-policy' into w/8.1/feature/S3C-2371-apis-check-bucket-policy 2019-08-13 14:18:39 -07:00
bert-e 03c3a40f70 Merge branch 'feature/S3C-2027-bucket-policy-design' into tmp/octopus/w/8.1/feature/S3C-2027-bucket-policy-design 2019-08-13 20:51:34 +00:00
bert-e 9820307c97 Merge branch 'w/8.0/bugfix/S3C-2369-limit-batch-delete-backport' into tmp/octopus/w/8.1/bugfix/S3C-2369-limit-batch-delete-backport 2019-08-12 21:18:26 +00:00
bert-e e7f6f3d060 Merge branch 'w/7.5/bugfix/S3C-2369-limit-batch-delete-backport' into tmp/octopus/w/8.0/bugfix/S3C-2369-limit-batch-delete-backport 2019-08-12 21:18:25 +00:00
Rahul Padigela 136cf9c5da bugfix: S3C-2369 bump arsenal 2019-08-09 15:28:39 -07:00
Rahul Padigela 0770fc48fe Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2369-limit-batch-delete' into w/8.1/bugfix/S3C-2369-limit-batch-delete 2019-08-09 14:46:54 -07:00
Rahul Padigela 5588f9fb65 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2369-limit-batch-delete' into w/8.0/bugfix/S3C-2369-limit-batch-delete 2019-08-09 14:12:39 -07:00
bbuchanan9 2fbdc6608b Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2345/allow-config-to-disable-counters' into w/8.1/improvement/S3C-2345/allow-config-to-disable-counters 2019-08-07 11:48:45 -07:00
bbuchanan9 b1cb7c1cc7 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2345/allow-config-to-disable-counters' into w/8.0/improvement/S3C-2345/allow-config-to-disable-counters 2019-08-07 11:22:53 -07:00
bert-e 1284f449b1 Merge branch 'w/8.0/feature/S3C-2284-bucket-policy-delete' into tmp/octopus/w/8.1/feature/S3C-2284-bucket-policy-delete 2019-08-01 18:00:27 +00:00
bert-e 5e6dba28a6 Merge branch 'feature/S3C-2284-bucket-policy-delete' into tmp/octopus/w/8.0/feature/S3C-2284-bucket-policy-delete 2019-08-01 18:00:27 +00:00
bert-e 9590838e82 Merge branch 'w/8.0/feature/S3C-2283-bucket-policy-get' into tmp/octopus/w/8.1/feature/S3C-2283-bucket-policy-get 2019-08-01 01:06:38 +00:00
bert-e d896145d60 Merge branch 'feature/S3C-2283-bucket-policy-get' into tmp/octopus/w/8.0/feature/S3C-2283-bucket-policy-get 2019-08-01 01:06:37 +00:00
bert-e 9a8adc9ad3 Merge branch 'w/8.1/improvement/ZENKO-2031-update-cdmiclient' into tmp/octopus/q/8.1 2019-08-01 00:35:55 +00:00
bert-e 93d50448a1 Merge branch 'w/8.0/improvement/ZENKO-2031-update-cdmiclient' into tmp/octopus/w/8.1/improvement/ZENKO-2031-update-cdmiclient 2019-07-31 23:56:11 +00:00
Rahul Padigela b5517b50f8 Merge remote-tracking branch 'origin/w/7.5/improvement/ZENKO-2031-update-cdmiclient' into w/8.0/improvement/ZENKO-2031-update-cdmiclient 2019-07-31 16:40:36 -07:00
Salim ccefcecc78 ci: enable ft_awssdk_external_backends 2019-07-30 19:01:25 -07:00
Salim aa02034926 tests: md only header put 2019-07-30 19:01:25 -07:00
Salim c6aff6f6d3 ft: higher level meta-mdonly put
This adds the meta-mdonly header check from the client libary to a higher order
function. This allows metadata only puts to all backends types.
2019-07-30 19:01:22 -07:00
bert-e 20281be5ef Merge branch 'w/8.0/feature/S3C-2277-bucket-policy-put' into tmp/octopus/w/8.1/feature/S3C-2277-bucket-policy-put 2019-07-30 00:19:09 +00:00
bert-e 538da6ec17 Merge branch 'feature/S3C-2277-bucket-policy-put' into tmp/octopus/w/8.0/feature/S3C-2277-bucket-policy-put 2019-07-30 00:19:08 +00:00
Rahul Padigela 48550bbe89 Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2216-bump-tags-limit' into w/8.1/improvement/S3C-2216-bump-tags-limit 2019-07-29 10:54:50 -07:00
Rahul Padigela 4ce2fc43ed Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2216-bump-tags-limit' into w/8.0/improvement/S3C-2216-bump-tags-limit 2019-07-29 10:32:04 -07:00
Alexander Chan 306dea4616 feature: ZENKO-2004 search for configs 2019-07-25 14:55:32 -07:00
bert-e 7072b365c3 Merge branches 'w/8.1/bugfix/ZENKO-1829_Add_init_container_for_certs' and 'q/1992/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/q/8.1 2019-07-24 23:57:44 +00:00
bert-e 2069df7ca8 Merge branches 'w/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs' and 'q/1992/7.5/bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/q/8.0 2019-07-24 23:57:43 +00:00
bert-e 2c514cb08b Merge branches 'w/8.1/feature/RELENG-2986-the-no-burst-policy' and 'q/1995/8.0/feature/RELENG-2986-the-no-burst-policy' into tmp/octopus/q/8.1 2019-07-24 23:21:44 +00:00
bert-e 41eed2de1f Merge branches 'w/8.0/feature/RELENG-2986-the-no-burst-policy' and 'q/1995/7.5/feature/RELENG-2986-the-no-burst-policy' into tmp/octopus/q/8.0 2019-07-24 23:21:43 +00:00
Thomas Carmet 4c680e1b53 Merge remote-tracking branch 'origin/w/8.0/feature/RELENG-2986-the-no-burst-policy' into w/8.1/feature/RELENG-2986-the-no-burst-policy 2019-07-24 13:23:42 -07:00
Thomas Carmet b7fed1898a Merge remote-tracking branch 'origin/w/7.5/feature/RELENG-2986-the-no-burst-policy' into w/8.0/feature/RELENG-2986-the-no-burst-policy 2019-07-24 13:18:48 -07:00
bert-e febd9803da Merge branch 'w/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/w/8.1/bugfix/ZENKO-1829_Add_init_container_for_certs 2019-07-24 17:24:26 +00:00
bert-e bb4ecec2a3 Merge branch 'bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/w/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs 2019-07-24 17:24:25 +00:00
bert-e c44e000d1f Merge branch 'w/8.0/feature/ZENKOIO-58-upload-junit-reports' into tmp/octopus/w/8.1/feature/ZENKOIO-58-upload-junit-reports 2019-07-24 16:40:06 +00:00
bert-e 88e01f0d8d Merge branch 'w/7.5/feature/ZENKOIO-58-upload-junit-reports' into tmp/octopus/w/8.0/feature/ZENKOIO-58-upload-junit-reports 2019-07-24 16:40:05 +00:00
Thomas Carmet 338d944aeb ZENKOIO-58 add junit upload on ceph stage 2019-07-23 17:56:04 -07:00
bert-e deecb70edf Merge branch 'w/8.0/feature/ZENKOIO-58-upload-junit-reports' into tmp/octopus/w/8.1/feature/ZENKOIO-58-upload-junit-reports 2019-07-24 00:52:25 +00:00
Thomas Carmet 2fa0e3967a ZENKOIO-58 upload junit on kmip stage 2019-07-23 17:51:56 -07:00
Thomas Carmet 7ea47e74fe Merge remote-tracking branch 'origin/w/8.0/feature/ZENKOIO-58-upload-junit-reports' into w/8.1/feature/ZENKOIO-58-upload-junit-reports 2019-07-23 17:34:10 -07:00
Thomas Carmet 78b2f33943 Merge remote-tracking branch 'origin/w/7.5/feature/ZENKOIO-58-upload-junit-reports' into w/8.0/feature/ZENKOIO-58-upload-junit-reports 2019-07-23 17:31:12 -07:00
anurag4dsb e9b87afd11
arrow style 2019-07-19 11:44:44 -07:00
anurag4dsb aafc55396f
trying arrow style linter 2019-07-19 11:12:27 -07:00
anurag4dsb 6ac5122a80
added retuen in cb 2019-07-19 10:51:31 -07:00
anurag4dsb 84dcd8ff34
hopefully this passes 2019-07-19 10:35:35 -07:00
anurag4dsb 8a7a0128fa
cbonce 2019-07-19 10:26:56 -07:00
anurag4dsb c80147cac2
added console.log 2019-07-19 10:16:41 -07:00
anurag4dsb 550604aa9b
only added 2019-07-19 10:14:53 -07:00
anurag4dsb bb6201d6cf
reverted mocha 2019-07-19 09:49:07 -07:00
anurag4dsb 75c23df174
changed mocha script 2019-07-18 12:45:57 -07:00
anurag4dsb 722cc304c6
updated mocha 2019-07-18 11:25:11 -07:00
anurag4dsb 478a264845
changed mocha minor version 2019-07-18 11:01:03 -07:00
anurag4dsb ec0c7f6eff
reverted everything 2019-07-18 10:35:56 -07:00
anurag4dsb b03bf4e192
[s/d] 100continue test 2019-07-18 10:28:17 -07:00
anurag4dsb 7eb39abc70
[s/d] running specific test 2019-07-18 10:20:53 -07:00
Katherine Laue 57294c8d5c updated internal deps for 8.1 2019-07-17 16:58:03 -07:00
Katherine Laue 6243922a20 Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2293-upgradeNodeJS' into w/8.1/improvement/S3C-2293-upgradeNodeJS 2019-07-17 16:47:57 -07:00
Katherine Laue 5b68f94c48 updated internal deps for 8.0 2019-07-17 16:44:24 -07:00
Katherine Laue a8006a4991 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2293-upgradeNodeJS' into w/8.0/improvement/S3C-2293-upgradeNodeJS 2019-07-17 16:42:27 -07:00
bert-e 453b4e5999 Merge branch 'w/8.0/bugfix/S3C-2157-sanity-check' into tmp/octopus/w/8.1/bugfix/S3C-2157-sanity-check 2019-07-12 06:25:57 +00:00
bert-e 3b16d16ccd Merge branch 'w/7.5/bugfix/S3C-2157-sanity-check' into tmp/octopus/w/8.0/bugfix/S3C-2157-sanity-check 2019-07-12 06:25:57 +00:00
bert-e eb2776b1cd Merge branch 'w/8.0/feature/S3C-2160-eve-backport' into tmp/octopus/w/8.1/feature/S3C-2160-eve-backport 2019-07-12 00:48:57 +00:00
bert-e b59e06a5db Merge branch 'w/7.5/feature/S3C-2160-eve-backport' into tmp/octopus/w/8.0/feature/S3C-2160-eve-backport 2019-07-12 00:48:57 +00:00
bert-e 2a8d541786 Merge branch 'w/8.1/bugfix/S3C-2172-bucket-error' into tmp/octopus/q/8.1 2019-07-11 21:16:38 +00:00
bert-e cf23c3ccf0 Merge branch 'feature/ZENKO-1840-mongo-auth-2' into q/8.1 2019-07-11 18:26:59 +00:00
Rahul Padigela 1861156ff9 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2172-bucket-error' into w/8.1/bugfix/S3C-2172-bucket-error 2019-07-11 10:42:13 -07:00
Rahul Padigela 5a66c65296 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2172-bucket-error' into w/8.0/bugfix/S3C-2172-bucket-error 2019-07-11 09:25:57 -07:00
Salim a7d93b0014 bugfix: allow delete markers on NFS backed objects 2019-07-08 12:03:47 -07:00
Rahul Padigela 20ccab2233 bugfix: ZENKO-1930 update arsenal to fix batchdelete 2019-07-03 17:56:34 -07:00
anurag4dsb ca74e04ac7
bf: ZENKO-1948 fix env variable for vault host check 2019-06-26 16:11:33 -07:00
anurag4dsb 7170b5722e
improvement: ZENKO-1944 Make Vaultd host modular 2019-06-26 11:54:30 -07:00
Nicolas Humbert 3806fe7030 ZENKO-1840 fix secure MongoDB access 2019-06-21 17:46:56 -07:00
Nicolas Humbert 30b0c8ceca update package.json 2019-06-19 15:57:14 -07:00
Nicolas Humbert 6ffaca6953 ZENKO-1840 Secure MongoDb access 2019-06-19 15:57:08 -07:00
Rahul Padigela c1c7fa2a1d bugfix: ZENKO-1908 update arsenal for batchdelete 2019-06-18 16:59:04 -07:00
Rahul Padigela cd665b9715 bugfix: remove unused sproxydclient 2019-06-18 16:48:51 -07:00
philipyoo 68cae12e07 bf: ZENKO-1736 update arsenal countItems
Update arsenal for countItems changes
2019-06-14 16:43:10 -07:00
bert-e 5f137d47b1 Merge branch 'feature/ZENKO-1360/doc-update' into q/8.1 2019-06-13 18:44:27 +00:00
bbuchanan9 320336a9e2 bugfix: ZENKO-1872 Check array length 2019-06-12 14:31:55 -07:00
bert-e 51b76c9803 Merge branch 'bugfix/ZENKO-1883-utapi-redis-config' into q/8.0 2019-06-12 18:48:04 +00:00
bert-e 1aba527523 Merge branches 'w/8.1/bugfix/ZENKO-1883-utapi-redis-config' and 'q/1934/8.0/bugfix/ZENKO-1883-utapi-redis-config' into tmp/octopus/q/8.1 2019-06-12 18:48:04 +00:00
Rahul Padigela 38b9f72607 bugfix: S3C-2243 add arsenal dependency 2019-06-11 19:34:25 -07:00
Rahul Padigela 43bf45df23 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2243-batchdelete-keys' into w/8.1/bugfix/S3C-2243-batchdelete-keys 2019-06-11 18:47:18 -07:00
Rahul Padigela e40be11058 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2243-batchdelete-keys' into w/8.0/bugfix/S3C-2243-batchdelete-keys 2019-06-11 18:14:18 -07:00
bert-e 2325f726b0 Merge branch 'bugfix/ZENKO-1883-utapi-redis-config' into tmp/octopus/w/8.1/bugfix/ZENKO-1883-utapi-redis-config 2019-06-11 18:04:25 +00:00
Jianqin Wang 207b743266 bugfix: fix utapi redis configuration parsing 2019-06-11 11:01:30 -07:00
Salim 3f0ef9a68a Merge branch 'w/8.0/improvement/ZENKO-1867-fix-product-version' into w/8.1/improvement/ZENKO-1867-fix-product-version 2019-06-10 14:22:44 -07:00
Salim e92ba8900f Merge branch 'w/7.5/improvement/ZENKO-1867-fix-product-version' into w/8.0/improvement/ZENKO-1867-fix-product-version 2019-06-10 14:21:57 -07:00
bert-e 9465ecffc1 Merge branch 'w/8.0/bugfix/S3C-2076-utapi-reindex-config' into tmp/octopus/w/8.1/bugfix/S3C-2076-utapi-reindex-config 2019-06-07 05:12:57 +00:00
bert-e c704293b04 Merge branch 'w/7.5/bugfix/S3C-2076-utapi-reindex-config' into tmp/octopus/w/8.0/bugfix/S3C-2076-utapi-reindex-config 2019-06-07 05:12:57 +00:00
Jianqin Wang cb23a7e81f Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2019-redis-authentication' into w/8.1/bugfix/S3C-2019-redis-authentication 2019-06-05 16:58:15 -07:00
Jianqin Wang ca7ea1465c Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2019-redis-authentication' into w/8.0/bugfix/S3C-2019-redis-authentication 2019-06-05 16:32:09 -07:00
bert-e 271c0e3075 Merge branch 'w/8.0/bugfix/S3C-2076/update-utapi-dep' into tmp/octopus/w/8.1/bugfix/S3C-2076/update-utapi-dep 2019-06-05 21:03:14 +00:00
bbuchanan9 0f7a8a59aa Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2076/update-utapi-dep' into w/8.0/bugfix/S3C-2076/update-utapi-dep 2019-06-05 13:58:47 -07:00
bert-e a9bdc8b1ba Merge branch 'w/8.1/bugfix/S3C-2076/add-utapi-reindex' into tmp/octopus/q/8.1 2019-06-05 07:41:27 +00:00
bbuchanan9 7ee1ec28f6 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2076/add-utapi-reindex' into w/8.1/bugfix/S3C-2076/add-utapi-reindex 2019-06-05 00:03:16 -07:00
bbuchanan9 eb67f17397 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2076/add-utapi-reindex' into w/8.0/bugfix/S3C-2076/add-utapi-reindex 2019-06-04 23:29:20 -07:00
bert-e 470f230212 Merge branch 'feature/ZENKO-1718-testsMongoClientInterface' into q/8.1 2019-06-04 22:31:43 +00:00
bert-e a6de663ad4 Merge branch 'feature/ZENKO-1842/azure-info-models' into q/8.1 2019-06-04 21:39:10 +00:00
Dora Korpar 8351555c3d bf: ZENKO 1857 utapi package version 2019-06-04 10:34:37 -07:00
Rahul Padigela 01ad582647 bugfix: S3C-1139 sproxyd batchdelete 2019-06-04 00:17:18 -07:00
Rahul Padigela 1dd0aa3fc3 Merge remote-tracking branch 'origin/w/8.0/feature/S3C-1139-sproxyd-batchdelete' into w/8.1/feature/S3C-1139-sproxyd-batchdelete 2019-06-04 00:12:26 -07:00
Rahul Padigela 7aaf561249 Merge remote-tracking branch 'origin/w/7.5/feature/S3C-1139-sproxyd-batchdelete' into w/8.0/feature/S3C-1139-sproxyd-batchdelete 2019-06-03 23:55:02 -07:00
bert-e 2aa5be05ed Merge branch 'w/8.1/bugfix/S3C-2222-artifact-conflicts' into tmp/octopus/q/8.1 2019-05-31 17:51:16 +00:00
dashagurova 738ddcb56a documentation: ZENKOIO-22 Update Old Links
adding documentation

remove break line
2019-05-30 15:42:48 -07:00
bert-e dbb9a5dc6f Merge branch 'w/8.0/bugfix/S3C-2222-artifact-conflicts' into tmp/octopus/w/8.1/bugfix/S3C-2222-artifact-conflicts 2019-05-30 21:58:30 +00:00
bert-e 30216d7d51 Merge branch 'w/7.5/bugfix/S3C-2222-artifact-conflicts' into tmp/octopus/w/8.0/bugfix/S3C-2222-artifact-conflicts 2019-05-30 21:58:29 +00:00
Guillaume Gimenez dcea59418b feature: ZENKO-1842: azure-info-models
Updated ModelVersion.md to refect models change in arsenal
2019-05-30 10:14:07 -07:00
philipyoo 4faed870d3 ft: ZENKO-1718 MongoClientInterface putObj tests 2019-05-28 09:39:10 -07:00
Dora Korpar db85022738 bf: ZENKO 1728 update arsenal 2019-05-24 15:58:02 -07:00
bert-e efacf39cfe Merge branches 'w/8.0/improvement/S3C-2034-bump-ioredis' and 'q/1836/7.5/improvement/S3C-2034-bump-ioredis' into tmp/octopus/q/8.0 2019-05-23 19:15:17 +00:00
bert-e 5e49e63f17 Merge branches 'w/8.1/improvement/S3C-2034-bump-ioredis' and 'q/1836/8.0/improvement/S3C-2034-bump-ioredis' into tmp/octopus/q/8.1 2019-05-23 19:15:17 +00:00
bert-e 6e6eb92513 Merge branches 'development/8.1' and 'w/8.0/improvement/S3C-2034-bump-ioredis' into tmp/octopus/w/8.1/improvement/S3C-2034-bump-ioredis 2019-05-23 17:19:03 +00:00
Jianqin Wang ef469216ac Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-23 10:18:44 -07:00
Jianqin Wang 6736acb18f Merge remote-tracking branch 'origin/development/8.0' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-23 10:16:38 -07:00
bert-e f52c0056cc Merge branch 'w/8.0/bugfix/S3C-2118-abort-mpu-crash-2' into tmp/octopus/w/8.1/bugfix/S3C-2118-abort-mpu-crash-2 2019-05-21 23:14:47 +00:00
Rahul Padigela 9fd8468e9c Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2118-abort-mpu-crash-2' into w/8.0/bugfix/S3C-2118-abort-mpu-crash-2 2019-05-21 16:00:08 -07:00
Jianqin Wang d07d85c944 Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis 2019-05-21 15:30:17 -07:00
Jianqin Wang 91f59f876a update: package-lock.json file 2019-05-21 15:26:05 -07:00
Jianqin Wang 48703f48cf Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2034-bump-ioredis' into w/8.1/improvement/S3C-2034-bump-ioredis 2019-05-21 15:12:42 -07:00
Jianqin Wang 5a09b0226b Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-21 14:52:24 -07:00
bbuchanan9 d0c6d93921 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2195/upload-copy-part-metrics' into w/8.1/bugfix/S3C-2195/upload-copy-part-metrics 2019-05-21 11:22:30 -07:00
bbuchanan9 f3ea4f1103 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2195/upload-copy-part-metrics' into w/8.0/bugfix/S3C-2195/upload-copy-part-metrics 2019-05-21 10:31:23 -07:00
Rahul Padigela 691b75aa18 improvement: ZENKO-1827 move to debian Dockerfile 2019-05-20 15:35:26 -07:00
bert-e b9ec6432cb Merge branch 'bugfix/ZENKO-1506_flaky_s3cmd_test' into q/8.1 2019-05-17 17:16:11 +00:00
bbuchanan9 70d4e010f2 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2105/add-utapi-crr-conditions' into w/8.1/bugfix/S3C-2105/add-utapi-crr-conditions 2019-05-17 09:59:03 +02:00
bert-e 62591df809 Merge branch 'w/7.5/bugfix/S3C-2105/add-utapi-crr-conditions' into tmp/octopus/w/8.0/bugfix/S3C-2105/add-utapi-crr-conditions 2019-05-17 07:55:53 +00:00
Taylor McKinnon e69619f784 bf(ZENKO-1506: Increase pod memory limits 2019-05-15 16:43:47 -07:00
Jonathan Gramain 78efc3f8b8 bugfix: ZENKO-1809 allow replication from md-ingestion account
Allow md-ingestion service account to trigger replication on new
objects, so that NFS ingestion buckets are allowed to replicate to
cloud locations.
2019-05-15 16:21:13 -07:00
Jonathan Gramain c9cc5849b2 bugfix: ZENKO-1809 flag nfs location type as supporting versioning
Change the supportsVersioning flag to true for NFS locations, as
although versioning is set initially by cloudserver, they should also
support new calls to putBucketVersioning() with Status=Enabled. This
partially fixes replication support by making sure the orbit workflow
can be applied to NFS locations.
2019-05-15 16:20:46 -07:00
bert-e e7308d4487 Merge branch 'feature/ZENKO-1755-add-support-for-hd' into q/8.1 2019-05-15 16:59:05 +00:00
Taylor McKinnon c9199c0cc3 bf(ZENKO-1506): Fix flaky s3cmd test 2019-05-13 11:34:18 -07:00
bert-e 3148a71091 Merge branches 'w/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' and 'q/1816/7.5/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into tmp/octopus/q/8.0 2019-05-10 23:36:24 +00:00
bert-e c0782322a3 Merge branches 'w/8.1/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' and 'q/1816/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into tmp/octopus/q/8.1 2019-05-10 23:36:24 +00:00
bbuchanan9 69d28bf0bc Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-1506/upgrade-utapi-dependency' into w/8.1/bugfix/S3C-1506/upgrade-utapi-dependency 2019-05-10 13:47:51 -07:00
bbuchanan9 2cf54d0579 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1506/upgrade-utapi-dependency' into w/8.0/bugfix/S3C-1506/upgrade-utapi-dependency 2019-05-10 11:52:50 -07:00
Benoit A 00e5b49b55 ZENKO-1755 Add hdclient dependency to package.json
Signed-off-by: Benoit A <benoit@scality.com>
2019-05-10 10:09:59 +02:00
bbuchanan9 a389e9e6d4 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into w/8.1/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes 2019-05-09 13:21:18 -07:00
bert-e 7e42b43c86 Merge branch 'w/7.5/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into tmp/octopus/w/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes 2019-05-08 23:10:25 +00:00
philipyoo 0cc3124999 bf: ZENKO-1585 userMD save hashed instance id
user metadata field for `constants.zenkoIDHeader` uses a
hashed instance id (of the current zenko deployment)
2019-05-02 13:57:45 -07:00
philipyoo fb795d557d bf: ZENKO-1585 apply zenko user metadata field
For objects/versions created in a Zenko deployment, we
apply a user metadata field indicating as so.
Changes apply for following ops:
- objectPut
- multiObjectDelete
- objectCopy
- initiateMPU
2019-05-02 13:57:45 -07:00
bert-e 88abf21f37 Merge branch 'q/1805/8.0/bugfix/S3C-1959-sanity-check' into tmp/normal/q/8.1 2019-05-02 06:39:29 +00:00
bert-e 9ae8e432d5 Merge branch 'w/8.1/bugfix/S3C-1959-sanity-check' into tmp/normal/q/8.1 2019-05-02 06:39:29 +00:00
bert-e b587ff29e2 Merge branches 'w/8.0/bugfix/S3C-1959-sanity-check' and 'q/1805/7.5/bugfix/S3C-1959-sanity-check' into tmp/octopus/q/8.0 2019-05-02 06:39:28 +00:00
Rahul Padigela 1df12f1b0e refactor: move tests to arsenal 2019-05-01 22:25:32 -07:00
Rahul Padigela ecd3d803a4 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-1959-sanity-check' into w/8.1/bugfix/S3C-1959-sanity-check 2019-05-01 21:55:11 -07:00
Rahul Padigela 07339703f5 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1959-sanity-check' into w/8.0/bugfix/S3C-1959-sanity-check 2019-05-01 17:33:47 -07:00
bert-e 80784c15cf Merge branches 'w/8.1/feature/S3C-1974_PyKMIP_in_CI' and 'q/1775/8.0/feature/S3C-1974_PyKMIP_in_CI' into tmp/octopus/q/8.1 2019-04-30 14:08:25 +00:00
bert-e 38ddb50a1c Merge branches 'w/8.0/feature/S3C-1974_PyKMIP_in_CI' and 'q/1775/7.5/feature/S3C-1974_PyKMIP_in_CI' into tmp/octopus/q/8.0 2019-04-30 14:08:25 +00:00
Dora Korpar 807130100f Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2120-abort-mpu-timeout' into w/8.1/bugfix/S3C-2120-abort-mpu-timeout 2019-04-24 17:09:31 -07:00
Dora Korpar a779e25ca6 fix linter 2019-04-24 16:37:05 -07:00
Dora Korpar 2426e47a16 S3C 2120 update arsenal 2019-04-24 15:38:03 -07:00
Dora Korpar f897998cb9 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2120-abort-mpu-timeout' into w/8.1/bugfix/S3C-2120-abort-mpu-timeout 2019-04-24 15:37:40 -07:00
Dora Korpar 1b958cfa23 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2120-abort-mpu-timeout' into w/8.0/bugfix/S3C-2120-abort-mpu-timeout 2019-04-24 15:20:40 -07:00
bert-e 6e431a7a32 Merge branches 'w/8.1/bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport' and 'q/1793/8.0/bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport' into tmp/octopus/q/8.1 2019-04-22 17:32:22 +00:00
bert-e 02cb1a8c57 Merge branch 'bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport' into q/8.0 2019-04-22 17:32:22 +00:00
bert-e 3b31ad1fb1 Merge branch 'bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport' into tmp/octopus/w/8.1/bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport 2019-04-19 23:54:08 +00:00
Taylor McKinnon 559a20c702 bf(ZENKO-1745): Fix HTTP Agent configuration in docker-entrypoint.sh 2019-04-19 16:53:09 -07:00
bert-e fb47185f5a Merge branch 'bugfix/ZENKO-1745-Fix-HTTP-Agent_config' into q/8.1 2019-04-19 20:37:12 +00:00
Taylor McKinnon a6b5965ac0 bf(ZENKO-1745): Fix HTTP Agent configuration in docker-entrypoint.sh 2019-04-19 10:49:16 -07:00
bert-e 0f91054f94 Merge branch 'w/8.1/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers' into tmp/octopus/q/8.1 2019-04-19 05:25:02 +00:00
bbuchanan9 61a6a159c8 bugfix: ZENKO-1606 MPU tagging during replication 2019-04-18 15:47:15 -07:00
philipyoo 4c5efef677 Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers' into w/8.1/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers 2019-04-18 12:00:54 -07:00
philipyoo 4060341963 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers' into w/8.0/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers 2019-04-18 10:37:28 -07:00
Taylor McKinnon 8e2c9ff6da Merge remote-tracking branch 'origin/w/8.0/feature/S3C-1974_PyKMIP_in_CI' into w/8.1/feature/S3C-1974_PyKMIP_in_CI 2019-04-16 10:15:57 -07:00
Taylor McKinnon 1d5199f226 Merge branch 'feature/S3C-1974_PyKMIP_in_CI' into w/8.0/feature/S3C-1974_PyKMIP_in_CI 2019-04-16 10:09:47 -07:00
bbuchanan9 34400be01a bugfix: ZENKO-1583 Add functional tests 2019-04-12 13:22:42 -07:00
bert-e c898505c01 Merge branch 'bugfix/ZENKO-1606/tagging-during-transition' into q/8.1 2019-04-10 18:32:32 +00:00
bbuchanan9 07f7ed1f1e bugfix: ZENKO-1606 Update Arsenal dependency 2019-04-10 09:43:24 -07:00
bbuchanan9 dc96e2ecfe bugfix: ZENKO-1606 Replicate tags from object PUT 2019-04-05 13:21:19 -07:00
bbuchanan9 ad395c568a bugfix: ZENKO-1583 Azure err code, add log detail 2019-04-03 11:15:49 -07:00
bbuchanan9 f68b216eb5 bugfix: ZENKO-1583 Add if-unmodified-since check 2019-04-02 18:02:16 -07:00
philipyoo 59c86b9144 ft: ZENKO-1661 rename ingestion status report var
Rename ingestion status report variable from
`ingestionStatus` to `ingestStatus`
2019-03-27 17:03:53 -07:00
philipyoo 00c60a986f ft: ZENKO-1661 add ingestion data to reporthandler
Add ingestion info (metrics, pause state) to report handler.

Attempt to rename "stats" -> "metrics" since
"stats" and "state" or "status" are very similar.
2019-03-27 12:11:30 -07:00
bert-e e0faa73dce Merge branch 'w/8.0/feature/S3C-2035/tls-files-loading' into tmp/octopus/w/8.1/feature/S3C-2035/tls-files-loading 2019-03-25 17:08:01 +00:00
bert-e 68dca6cddf Merge branch 'feature/S3C-2035/tls-files-loading' into tmp/octopus/w/8.0/feature/S3C-2035/tls-files-loading 2019-03-25 17:08:00 +00:00
bert-e 56fbbe9c34 Merge branch 'bugfix/ZENKO-1610/disable-non-current-version-in-api' into q/8.1 2019-03-21 22:12:29 +00:00
bbuchanan9 51728c2019 bugfix: ZENKO-1610 Non-current version transition 2019-03-21 13:13:22 -07:00
bert-e afb64940de Merge branch 'feature/ZENKO-1585-replication-group-id' into q/8.1 2019-03-21 15:50:15 +00:00
bbuchanan9 fa9f7a768c feature: ZENKO-1529 Add managed transition report 2019-03-19 13:21:01 -07:00
Dora Korpar 655a74f195 bf: ZENKO 1598 bb routes regression 2019-03-18 15:31:12 -07:00
Rahul Padigela c41bf73a5c feature: ZENKO-1585 configure replication group id
Replication group id is used in the calculation of version ids for objects
on buckets that have versioning enabled. It has many benefits including avoiding
collisions in an active-active scenario, identifying distinct entries from an
instance etc.
This allows commit allows configuring a custom replication group id for an instance,
ensuring that the version ids generated by the instance are unique.
2019-03-18 12:04:59 -07:00
bert-e ed8838de54 Merge branch 'bugfix/ZENKO-1568-lc-update-crash' into q/8.1 2019-03-15 23:51:45 +00:00
bert-e 11e3b19475 Merge branch 'feature/ZENKO-1420-returnLocationFromMultipleBackendPutObject' into q/8.1 2019-03-15 23:51:16 +00:00
Dora Korpar 5fa4754ba4 bf: ZENKO 1402 location constraint update event 2019-03-15 16:39:49 -07:00
Jonathan Gramain 086250dc82 feature: ZENKO-1420 return locations from backbeat PUT routes
Return the locations array in the API of putObject and completeMPU
multiple backend routes, so that transition policies can use it to
update metadata with the new location as-is.
2019-03-15 16:24:45 -07:00
Guillaume Gimenez 78a5cd8b6e Merge remote-tracking branch 'origin/w/8.0/feature/S3C-2032/update-arsenal-deps' into w/8.1/feature/S3C-2032/update-arsenal-deps 2019-03-15 12:01:42 -07:00
Guillaume Gimenez 6bf16a6d24 Merge remote-tracking branch 'origin/feature/S3C-2032/update-arsenal-deps' into w/8.0/feature/S3C-2032/update-arsenal-deps 2019-03-14 17:40:28 -07:00
Rahul Padigela 842ada6960 feature: ZENKO-1570 enable versioning for ingestion
This commit ensures that versioning is automatically enabled for ingestion
buckets upon creation. It satisfies the requirement that all ingestion buckets
must be versioning enabled.
2019-03-14 10:11:04 -07:00
bert-e 5b6c67a67d Merge branch 'w/8.0/bugfix/S3C-1396-website-redirect-header-response' into tmp/octopus/w/8.1/bugfix/S3C-1396-website-redirect-header-response 2019-03-12 19:27:50 +00:00
Giacomo Guiulfo c83e623f44 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1396-website-redirect-header-response' into w/8.0/bugfix/S3C-1396-website-redirect-header-response 2019-03-12 12:26:28 -07:00
Rahul Padigela 7a538b012c Merge remote-tracking branch 'origin/w/8.0/bugfix/S3C-2002-restricted-admin-access' into w/8.1/bugfix/S3C-2002-restricted-admin-access 2019-03-11 22:08:37 -07:00
Rahul Padigela 68ab0821b7 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2002-restricted-admin-access' into w/8.0/bugfix/S3C-2002-restricted-admin-access 2019-03-11 17:30:50 -07:00
bert-e 66cd32f08c Merge branch 'w/8.0/feature/S3C-1969/kmip-cloudserver-integration' into tmp/octopus/w/8.1/feature/S3C-1969/kmip-cloudserver-integration 2019-03-07 00:22:16 +00:00
bert-e 460a9cec7e Merge branch 'feature/S3C-1969/kmip-cloudserver-integration' into tmp/octopus/w/8.0/feature/S3C-1969/kmip-cloudserver-integration 2019-03-07 00:22:16 +00:00
bert-e 0ffb1fea7f Merge branch 'feature/ZENKO-1402-move-data-wrapper' into q/8.1 2019-03-04 19:29:15 +00:00
Taylor McKinnon 3b47d0a636 bf(ZENKO-1511): Update Ceph CI Image 2019-03-01 15:11:27 -08:00
anurag4DSB 1823d56afd Merge remote-tracking branch 'origin/w/8.0/improvement/S3C-2005-updateVaultClient' into w/8.1/improvement/S3C-2005-updateVaultClient
Conflicts:
	package-lock.json
2019-02-27 22:06:02 +00:00
anurag4DSB 00023f9274 Merge remote-tracking branch 'origin/improvement/S3C-2005-updateVaultClient' into w/8.0/improvement/S3C-2005-updateVaultClient
Conflicts:
	package-lock.json
	package.json
2019-02-27 21:56:09 +00:00
Dora Korpar 26004e59fa [chore] update arsenal 2019-02-26 14:54:37 -08:00
Dora Korpar 5aa25752a8 ft: ZENKO 1402 move data wrapper to arsenal 2019-02-26 14:54:11 -08:00
Taylor McKinnon 279c71382f Spawn file operations syncronously 2019-02-19 16:23:13 -08:00
Taylor McKinnon f1d2cd5d00 Bump test pod memory limits 2019-02-19 11:00:03 -08:00
Taylor McKinnon ceec8edae3 merge 2019-02-19 10:50:23 -08:00
Taylor McKinnon ae04387989 feat(ZENKO-1508): Add Ceph Orbit support 2019-02-15 17:41:47 -08:00
Taylor McKinnon 74b9369a93 feat(ZENKO-1508): Add Ceph Orbit support 2019-02-15 15:23:23 -08:00
Salim b95f6da45b ci: update mongo to 3.6.8 2019-02-13 17:34:38 -08:00
bert-e 20393b891e Merge branch 'w/8.1/bugfix/ZENKO-1460-maven-fix' into tmp/octopus/q/8.1 2019-02-12 01:45:23 +00:00
bert-e 0ce57a3d34 Merge branch 'w/7.5/bugfix/ZENKO-1460-maven-fix' into tmp/octopus/w/8.0/bugfix/ZENKO-1460-maven-fix 2019-02-11 19:42:46 +00:00
bert-e aa2459fdaf Merge branch 'w/8.0/bugfix/ZENKO-1460-maven-fix' into tmp/octopus/w/8.1/bugfix/ZENKO-1460-maven-fix 2019-02-11 19:42:46 +00:00
Dora Korpar 5c7667f90c [chore] increase test aggressor mem 2019-02-11 10:21:53 -08:00
Dora Korpar ac8490c2cd update Arsenal version 2019-02-11 10:21:53 -08:00
Dora Korpar fb19b61d69 ft: ZENKO 1402 move data backends to arsenal 2019-02-11 10:21:53 -08:00
bert-e 7d1f0f8884 Merge branch 'bugfix/ZENKO-1453-ci-proxyvars' into tmp/octopus/w/8.1/bugfix/ZENKO-1453-ci-proxyvars 2019-02-08 23:20:47 +00:00
Rahul Padigela 766eb10980 bugfix: ZENKO-1452 avoid proxy conflicts in ci 2019-02-08 15:05:01 -08:00
William Abernathy 8970e5ef23 bugfix: S3C-1922_Remove_scality/s3server_refs
S3Server references were out of date (changed to CloudServer).
Fixing them revealed most of the links were broken, a lot of the
language was inelegant, and the organization made the document
hard to follow. Hence the voluminous corrections.

changed all instances of "Cloudserver" to "CloudServer"
2019-02-06 10:14:15 -08:00
Guillaume Gimenez c6362f22f3 feature: ZENKO-1360 fix typo in doc
(this is an external contribution)
2019-02-05 22:22:32 -08:00
William Abernathy b16e4574b6 feature: ZENKO-1466 convert md search doc to rst 2019-02-05 21:54:32 -08:00
Rahul Padigela 44c45374cb improvement: ZENKO-1465 avoid overlogging 2019-02-05 17:47:04 -08:00
Taylor McKinnon 961bddd9c1 Disable failing (or possibly flaky) tests ZENKO-1445 2019-01-28 10:55:28 -08:00
Taylor McKinnon 0cd34a7739 Buffer.byteLength doesn't accept null or undefined 2019-01-28 10:54:23 -08:00
Taylor McKinnon be04c145a5 Move to new docker image based on node:8-alpine 2019-01-28 10:54:19 -08:00
bert-e 91c19e522f Merge branch 'bugfix/ZENKO-1435-pathstyle-https' into q/8.1 2019-01-10 01:07:07 +00:00
Rahul Padigela 16d953d251 bugfix: ZENKO-1435 use CI_CEPH has test flag
The usage of CI env as a test flag for configuring https, pathStyle
options breaks other tests, so the env is being changed to be CI_CEPH
confining the scope to only CEPH backend tests.
2019-01-09 16:28:58 -08:00
Rahul Padigela 39a271d9b3 bugfix: ZENKO-1435 remove coverage commands
This switches up the coverage cmds of istanbul in favor of vanilla
mocha output for test results accuracy.
2019-01-09 15:32:07 -08:00
bert-e d8183f95bf Merge branch 'w/8.0/bugfix/S3C-1933-remove-pod-ttl' into tmp/octopus/w/8.1/bugfix/S3C-1933-remove-pod-ttl 2019-01-09 15:24:37 +00:00
bert-e e5f208b8d2 Merge branch 'bugfix/S3C-1933-remove-pod-ttl' into tmp/octopus/w/8.0/bugfix/S3C-1933-remove-pod-ttl 2019-01-09 15:24:36 +00:00
Giacomo Guiulfo 61c99fb4bb bugfix(pfsd): add noCache and noSync options 2019-01-07 17:27:24 -08:00
Giacomo Guiulfo 7d1ea8698f Merge remote-tracking branch 'origin/bugfix/ZENKO-1369-no-cache-option' into w/8.1/bugfix/ZENKO-1369-no-cache-option 2019-01-07 16:58:58 -08:00
Giacomo Guiulfo dd56a3c25c bugfix(s3-data): add noCache option 2019-01-07 16:52:28 -08:00
Rahul Padigela 7e9068a293 improvement: ZENKO-1421 ignore new rules for now 2019-01-06 23:22:25 -08:00
Rahul Padigela 9ef5bb9a41 improvement: ZENKO-1421 update eslint modules 2019-01-06 23:21:59 -08:00
Giacomo Guiulfo f3e403f31c feat: add nfs-mount-v1 support 2018-12-28 10:32:05 -08:00
Bennett Buchanan 8913851975 feature: ZENKO-1399 Update Arsenal dependency 2018-12-20 13:21:45 -08:00
bert-e 624b1ff73d Merge branch 'w/8.1/feature/S3C-1903-public-read' into tmp/octopus/q/8.1 2018-12-20 01:53:44 +00:00
bert-e 480e8f21f7 Merge branch 'improvement/ZENKO-1362-bump-arsenal' into q/8.1 2018-12-20 01:46:19 +00:00
bert-e 4122b85bc6 Merge branch 'w/8.0/feature/S3C-1903-public-read' into tmp/octopus/w/8.1/feature/S3C-1903-public-read 2018-12-20 01:31:36 +00:00
Rahul Padigela d4e7091c09 Merge remote-tracking branch 'origin/feature/S3C-1903-public-read' into w/8.0/feature/S3C-1903-public-read 2018-12-19 17:09:50 -08:00
JianqinWang 20b487ecf0 ZENKO-1343: include md-ingestion as valid bb acc
- move list of service accounts to constants.js
2018-12-19 16:29:29 -08:00
Giacomo Guiulfo 11b20c7302 chore: bump arsenal 2018-12-19 14:58:43 -05:00
Bennett Buchanan df1afd5252 feature: ZENKO-1388 Versioning for MD updates 2018-12-18 09:37:04 -08:00
bert-e 74fa5138c0 Merge branch 'feature/ZENKO-1317/aws-lifecycle-compat' into q/8.1 2018-12-17 21:56:34 +00:00
Salim 33a8e1c479 Merge remote-tracking branch 'origin/w/8.0/bugfix/ZENKO-1337-escape-char' into w/8.1/bugfix/ZENKO-1337-escape-char 2018-12-17 13:33:46 -08:00
bert-e 32e505e7b6 Merge branch 'bugfix/ZENKO-1337-escape-char' into tmp/octopus/w/8.0/bugfix/ZENKO-1337-escape-char 2018-12-17 21:31:46 +00:00
Bennett Buchanan c59fc739a5 feature: ZENKO-1317 AWS lifecycle compat 2018-12-17 12:49:08 -08:00
bert-e a365c63b1c Merge branch 'bugfix/ZENKO-1365-orbit-ring-s3-https' into tmp/octopus/w/8.1/bugfix/ZENKO-1365-orbit-ring-s3-https 2018-12-14 20:06:59 +00:00
Dora Korpar f20b32fbe8 bugfix: ZENKO-1365 Orbit set https for RING+S3 2018-12-14 12:04:38 -08:00
Bennett Buchanan d47b71d612 feature: ZENKO-733 Lifecycle non-versioned buckets 2018-12-12 15:47:57 -08:00
bert-e e4fc624855 Merge branch 'feature/ZENKO-557-gcCloudLocations' into q/8.1 2018-12-12 19:28:00 +00:00
bert-e 0c0bfd75f6 Merge branch 'bugfix/ZENKO-1290-Add-CEPH-copy-object-support' into q/8.1 2018-12-12 19:21:07 +00:00
Bennett Buchanan de0115553c feature: ZENKO-732 Lifecycle transition policies 2018-12-11 16:58:18 -08:00
Jonathan Gramain 9bf709839b ZENKO-557 pass 'deleteVersion' param to backend
Cloud backends require the 'deleteVersion' param to be set to true in
order to delete a specific version.

Add a functional test to GC a versioned AWS location (all cloud
locations use the same interface from multipleBackend, hence should
"just work", assuming backends delete() is tested individually).

Note that backbeat route tests have been moved to
tests/multipleBackends because they require a real AWS location.

Fixed existing backbeat DELETE route test: this test has been moved to
multipleBackend along with other backbeat route tests, which seems to
have enabled it while it used to be disabled, it now fails because of
extra params passed to the listMultipartUploads AWS sdk call. Removing
the extra params fixes the test.

Also removed unused left-over file "tests.bash".
2018-12-11 15:54:45 -08:00
Taylor McKinnon 65b0514d5e bugfix(ZENKO-1290): Add CEPH copy object support 2018-12-11 14:47:59 -08:00
bert-e 7f25974c08 Merge branch 'feature/ZENKO-1351-pfs-client-delete' into q/8.1 2018-12-11 01:44:35 +00:00
Giacomo Guiulfo 8606524b0e feat(PfsClient): add delete functionality 2018-12-11 00:28:37 +00:00
Rahul Padigela b5e17918b2 feature: ZENKO-1359 add ingestion capability
This flag allows Orbit to identify Zenko instances which have Ingestion
feature capabilities.
2018-12-10 15:12:02 -08:00
Taylor McKinnon c6c08508c6 Make CI env var force path style buckets 2018-12-03 13:21:13 -08:00
bert-e 476acd822e Merge branch 'w/8.0/bugfix/S3C-1843-fix-npm-dependency-vuln' into tmp/octopus/w/8.1/bugfix/S3C-1843-fix-npm-dependency-vuln 2018-11-28 01:46:33 +00:00
JianqinWang 5839daf56c Merge remote-tracking branch 'origin/bugfix/S3C-1843-fix-npm-dependency-vuln' into w/8.0/bugfix/S3C-1843-fix-npm-dependency-vuln 2018-11-27 17:25:22 -08:00
bert-e 8fa9ef3f19 Merge branch 'feature/ZENKO-717' into q/8.1 2018-11-16 00:23:25 +00:00
jeremyds f2e28db1e4 ft: ZENKO-717: replicationBackends constant moved to Arsenal
With the management code moved from cloud server to its own repository,
this constant should be shared in Arsenal constants.
2018-11-15 15:51:06 -08:00
bert-e 324a0013d1 Merge branch 'improvement/update-python-dependencies' into tmp/octopus/w/8.1/improvement/update-python-dependencies 2018-11-15 23:00:32 +00:00
Rahul Padigela 3a1b34a7b0 improvement: ignore .tox dir 2018-11-15 14:57:46 -08:00
Rahul Padigela d100db1950 improvement: update python dependencies for docs
fixes CVE-2018-18074 alert
2018-11-15 14:55:59 -08:00
JianqinWang f2d59bc4f9 ZENKO-833: include ingestion option for bucket creation 2018-11-15 12:11:15 -08:00
Rahul Padigela 360928b60b Merge remote-tracking branch 'origin/w/8.0/feature/S3C-1807-MD-admin-routes' into w/8.1/feature/S3C-1807-MD-admin-routes 2018-11-15 11:42:51 -08:00
Rahul Padigela 8ffb5d2e71 Merge remote-tracking branch 'origin/feature/S3C-1807-MD-admin-routes' into w/8.0/feature/S3C-1807-MD-admin-routes 2018-11-15 11:11:17 -08:00
Giacomo Guiulfo d587e3d63a feat(ZENKO-1270): pfs external backend 2018-11-14 16:57:51 -08:00
Giacomo Guiulfo 144ade3d25 feat(ZENKO-1270): pfs daemon 2018-11-14 16:48:49 -08:00
bert-e c1d72f47ab Merge branch 'w/8.1/bugfix/ZENKO-1311-ci-fixes' into tmp/octopus/q/8.1 2018-11-14 22:03:28 +00:00
Salim 75510a2512 improvement: push to latest tag 2018-11-14 13:38:55 -08:00
Salim c2d44466b7 Merge remote-tracking branch 'origin/w/8.0/bugfix/ZENKO-1311-ci-fixes' into w/8.1/bugfix/ZENKO-1311-ci-fixes 2018-11-14 13:38:32 -08:00
Salim fca1bee0bd Merge remote-tracking branch 'origin/bugfix/ZENKO-1311-ci-fixes' into w/8.0/bugfix/ZENKO-1311-ci-fixes 2018-11-14 13:36:32 -08:00
bert-e de3bd1f4d5 Merge branch 'feature/ZENKO-1044-Ceph-backend-tests' into q/8.1 2018-11-14 21:35:44 +00:00
Taylor McKinnon 7ccfc141bf feat(ZENKO-1044): Add ceph backend tests 2018-11-14 13:14:29 -08:00
vrancurel f65a384ced improvement: use Arsenal BucketClientInterface 2018-11-13 12:09:25 -08:00
vrancurel d810b27eb4 cleanup: Remove duplicated code
Bucketclient and bucketfile seem to be identical to Arsenal.
  We need to keep a dependency on bucket client lib for avoiding
  circular dependency.
2018-11-13 11:35:51 -08:00
bert-e 2512b29ff8 Merge branches 'w/8.1/bugfix/S3C-1752-legacy-mpu' and 'q/1618/8.0/bugfix/S3C-1752-legacy-mpu' into tmp/octopus/q/8.1 2018-11-10 01:28:23 +00:00
bert-e 8497eac258 Merge branches 'w/8.0/bugfix/S3C-1752-legacy-mpu' and 'q/1618/7.4/bugfix/S3C-1752-legacy-mpu' into tmp/octopus/q/8.0 2018-11-10 01:28:22 +00:00
bert-e 6d3abd1b06 Merge branch 'w/8.0/bugfix/S3C-1752-legacy-mpu' into tmp/octopus/w/8.1/bugfix/S3C-1752-legacy-mpu 2018-11-10 01:05:04 +00:00
Dora Korpar 697d3e0ab3 Merge remote-tracking branch 'origin/bugfix/S3C-1752-legacy-mpu' into w/8.0/bugfix/S3C-1752-legacy-mpu 2018-11-09 16:55:22 -08:00
bert-e 129239e095 Merge branch 'w/8.0/improvement/simplify-ci-unit-test' into tmp/octopus/w/8.1/improvement/simplify-ci-unit-test 2018-11-09 18:19:41 +00:00
bert-e 1618a840a4 Merge branch 'improvement/simplify-ci-unit-test' into tmp/octopus/w/8.0/improvement/simplify-ci-unit-test 2018-11-09 18:19:40 +00:00
Rahul Padigela 5b39afff13 Merge remote-tracking branch 'origin/improvement/ZENKO-1291-extBackends-keepAlive' into w/8.1/improvement/ZENKO-1291-extBackends-keepAlive 2018-11-02 14:22:35 -07:00
Rahul Padigela 7a32b82b2d improvement: ZENKO-1291 disable keepAlive for AWS
This commit disables keep-alive on connections to AWS as connection
reuse showed intermittent socket hang ups.
2018-11-02 14:16:06 -07:00
Rahul Padigela 9db17e5578 improvement: ZENKO-1291 configurable httpAgent options
This commit adds the ability to configure httpAgent options
for external backends. Currently only AWS and GCP are
suppored. Azure is not supported as there is no straight
forward way to set a custom httpAgent in the Azure SDK.
The defaults are expressed to be sensible and explicit.
The default maxSockets is inifinity which is expressed as
`null` and other values are inspired by node.js defaults.
This configuration is applied globally for all locations of
the same type of external backend.
2018-11-02 14:16:01 -07:00
bert-e 04c987d097 Merge branch 'improvement/ZENKO-1281-logGCPRequestIds' into tmp/octopus/w/8.1/improvement/ZENKO-1281-logGCPRequestIds 2018-10-30 18:40:08 +00:00
Rahul Padigela 654d6105aa improvement: parse and include gcp request id on errors 2018-10-30 11:39:27 -07:00
bert-e bb0fbcd0cb Merge branch 'feature/ua-tagging' into tmp/octopus/w/8.1/feature/ua-tagging 2018-10-30 18:00:45 +00:00
Dora Korpar 75646b55ed ft: ZENKO 1042 azure user-agent tagging" 2018-10-30 10:58:52 -07:00
Dora Korpar 73aa224e30 ft: ZENKO 390 aws user-agent tagging 2018-10-30 10:58:04 -07:00
bert-e a994d54e6e Merge branches 'w/8.1/improvement/ZENKO-1281-logRequestIds' and 'q/1599/8.0/improvement/ZENKO-1281-logRequestIds' into tmp/octopus/q/8.1 2018-10-29 20:18:29 +00:00
bert-e e370bd6aa4 Merge branch 'improvement/ZENKO-1281-logRequestIds' into q/8.0 2018-10-29 20:18:29 +00:00
bert-e aabc2e0ba2 Merge branch 'improvement/ZENKO-1281-logRequestIds' into tmp/octopus/w/8.1/improvement/ZENKO-1281-logRequestIds 2018-10-29 04:35:18 +00:00
Rahul Padigela 763c58f36c improvement: log external request id 2018-10-28 21:32:44 -07:00
bert-e 7974726ebf Merge branch 'bugfix/ZENKO-1274-fixLogProperty' into q/8.1 2018-10-26 17:25:08 +00:00
bert-e 82f1d088f7 Merge branch 'w/8.0/improvement/S3C-1745-bucket-deletion' into tmp/octopus/w/8.1/improvement/S3C-1745-bucket-deletion 2018-10-25 01:56:50 +00:00
Rahul Padigela 1b8c4cc15f Merge remote-tracking branch 'origin/improvement/S3C-1745-bucket-deletion' into w/8.0/improvement/S3C-1745-bucket-deletion 2018-10-24 18:54:25 -07:00
bert-e 2a5fc70daa Merge branch 'bugfix/wait-for-local-port-fix' into tmp/octopus/w/8.1/bugfix/wait-for-local-port-fix 2018-10-24 22:22:49 +00:00
jeremyds dd500ed601 Revert "bugfix: reduce flakiness of server startup wait"
This reverts commit a65eb80873.
2018-10-24 11:07:38 -07:00
Dora Korpar 9f2580a397 ft: ZENKO 390 aws user-agent tagging 2018-10-24 11:01:15 -07:00
Rahul Padigela 16cb2934ec improvement: add aws request ids to logs 2018-10-24 10:45:43 -07:00
Dora Korpar b40706bd18 ft: ZENKO 1042 azure user-agent tagging" 2018-10-23 14:22:48 -07:00
philipyoo 01c2ef617e bf: ZENKO-1274 fix log property name 2018-10-19 16:49:08 -07:00
Rahul Padigela 417d16988f bugfix: ZENKO-1269 fix undefined callback 2018-10-18 16:09:41 -07:00
bert-e 3b04f89e3e Merge branch 'w/8.0/feature/S3C-1499-apiv2' into tmp/octopus/w/8.1/feature/S3C-1499-apiv2 2018-10-17 19:11:16 +00:00
Dora Korpar 2404d52a78 Merge remote-tracking branch 'origin/feature/S3C-1499-apiv2' into w/8.0/feature/S3C-1499-apiv2 2018-10-17 11:50:14 -07:00
Bennett Buchanan a92f503d1d bugfix: ZENKO-697 Add abort MPU in route backbeat 2018-10-08 17:27:30 -07:00
jeremyds 62df2c09c9 ft: ZENKO-714: management agent client in S3.
Now there is a new process handling the management feature we should add
a websocket client in S3 to receive the overlay. To safely move from the
management code in S3 to the full use of the new management process,
this code is run only if the env variable MANAGEGEMENT_USE_AGENT is
defined and equal to one.
2018-09-27 09:25:34 -07:00
jeremyds a6f900627b ft: ZENKO-714: export applyAndSaveOverlay
This function will be needed by the management agent client in S3.
Export it and take the opportunity to move it to configuration.js.
2018-09-25 15:10:57 -07:00
jeremyds f708b87c7b ft: ZENKO-713: save and patch role
Make the management process saves the overlay into metadata backend
before to send it to its client.
2018-09-25 15:09:26 -07:00
jeremyds 4f30e62279 ft: ZENKO-713: fix msg format 2018-09-24 17:38:34 -07:00
jeremyds 7932127cf4 ft: ZENKO-713: fix management process shutdown 2018-09-24 17:38:34 -07:00
bert-e e035216faa Merge branch 'feature/ZENKO-1186/stalledSanityCheck' into tmp/octopus/w/8.1/feature/ZENKO-1186/stalledSanityCheck 2018-09-21 00:24:27 +00:00
Alexander Chan 674b2626a4 ft: ZENKO-1186 stalled sanity check 2018-09-20 15:43:18 -07:00
bert-e dd83b0fee0 Merge branch 'improvement/arsenalUpdate' into tmp/octopus/w/8.1/improvement/arsenalUpdate 2018-09-16 03:09:15 +00:00
Alexander Chan a518e48d09 improvement: update arsenal 2018-09-15 19:37:45 -07:00
bert-e 9e020e8791 Merge branch 'bugfix/ZENKO-1146-localReadUntilReplicatedToPreferredRead' into tmp/octopus/w/8.1/bugfix/ZENKO-1146-localReadUntilReplicatedToPreferredRead 2018-09-14 23:10:05 +00:00
Jonathan Gramain 83356f9510 bf: ZENKO-1146 read from source until replicated to preferred read
When a preferred read location is defined for an object, and when
status of replication to this location is PENDING or FAILED, read
object from the source location instead of returning an error.

Note that reads are not guaranteed to succeed if the source location
is a transient source, because of a race condition with the garbage
collector. This will be addressed in a future change.
2018-09-14 16:00:26 -07:00
bert-e 7fc8479a5b Merge branch 'bugfix/ZENKO-1124-bump-arsenal-version' into tmp/octopus/w/8.1/bugfix/ZENKO-1124-bump-arsenal-version 2018-09-11 01:13:46 +00:00
jeremyds d8010b26ca Merge remote-tracking branch 'origin/development/8.0' into HEAD 2018-09-10 18:12:58 -07:00
jeremyds d8b4e241fc ZENKO-1124: mongo listing fix, bump arsenal version. 2018-09-10 18:07:09 -07:00
Nicolas Humbert 314ad123b8 Merge remote-tracking branch 'origin/bugfix/ZENKO-1112/log' into w/8.1/bugfix/ZENKO-1112/log 2018-09-10 14:16:38 -07:00
Nicolas Humbert b640764a54 fx: ZENKO-1112 Management client error logging 2018-09-10 14:08:18 -07:00
bert-e 88fc1d3611 Merge branch 'bugfix/ZENKO-1113-gcp-del-204' into tmp/octopus/w/8.1/bugfix/ZENKO-1113-gcp-del-204 2018-09-07 17:57:39 +00:00
Rahul Padigela 7084975d69 bugfix: filter NoSuchKey errors when deleting on AWS/GCP
This accounts for 404s when trying to delete something that doesn't exist
or hasn't been replicated yet. It returns no error to the client as the delete
is a no-op and is inline with S3 API compatibility.
2018-09-07 13:56:36 -04:00
Nicolas Humbert 96f8784003 Merge remote-tracking branch 'origin/bugfix/ZENKO-1123/report' into w/8.1/bugfix/ZENKO-1123/report 2018-09-06 17:11:37 -07:00
Nicolas Humbert 34cf34551c fx: ZENKO-1108 fail to get metrics report 2018-09-06 16:54:34 -07:00
bert-e 127375f536 Merge branch 'feature/ZENKO-1108/set-region-for-AWS-locations' into tmp/octopus/w/8.1/feature/ZENKO-1108/set-region-for-AWS-locations 2018-09-05 18:38:43 +00:00
Bennett Buchanan b3ad82b0e4 feature: ZENKO-1108 Set region for AWS locations 2018-09-05 11:35:08 -07:00
bert-e de44582b8f Merge branch 'bugfix/ZENKO-1091-redirect-payload' into tmp/octopus/w/8.1/bugfix/ZENKO-1091-redirect-payload 2018-09-05 05:05:58 +00:00
Rahul Padigela d982f8f603 bugfix: set region endpoint during location setup
This commit removes the current redirect code and adds a setup method
that makes a call to get the region of the destination bucket on AWS S3
and updates the endpoint to use the correct region specific endpoint.
This should avoid redirects from AWS which in turn messes up the payload
causing errors during PUTs. The setup call will be cleaned up to address
the technical debt of waiting for the callback.
2018-09-05 01:04:47 -04:00
Rahul Padigela 9394d4111b improvement: revert ZENKO-819 regional endpoints 2018-09-04 16:00:18 -04:00
bert-e 6ae4d89232 Merge branch 'improvement/ZENKO-1100-invalid-loc-metric' into tmp/octopus/w/8.1/improvement/ZENKO-1100-invalid-loc-metric 2018-08-31 23:33:56 +00:00
Rahul Padigela b17904e7f5 improvement: use updated UtapiClient for location metric 2018-08-31 16:32:43 -07:00
jeremyds 007f742b92 ft: ZENKO-713: management agent websocket server
This commit creates a websocket server which adds a '/watch' route. The
management agent registers a listener to the new configuration event and
then sends it to its client.
2018-08-31 12:25:02 -07:00
jeremyds e7a23276ce ft: ZENKO-713: metadata.setup in initManagement.
The future management process needs metadata setup. Call added at
initManagement function beginning, conditioned to the new management
agent use. setup the metadata in this function allows to take advantage
of its auto retry mechanism.
2018-08-31 12:25:02 -07:00
jeremyds a779ce91cf ft: ZENKO-713: overlay event listener
The current management code runs in S3 patches and saves received
overlay. The new management process, which will manage all the
management feature, won't patch and save the new overlay, it is a S3
specific task.

This commit adds a new overlay event listerner from the management
push code (where the new overlay is received). It will allow the
management process to send to its client the new overlay.

A new env varialble is added in this commit. It is temporary used to
remove safely the management feature from the S3 process, keeping the
previous behavior in a first time.
2018-08-31 12:25:02 -07:00
jeremyds 2228d4a59a ft: ZENKO-713: management agent message type definition.
Define a new message type for the futur communication between the
management agent and its client.
2018-08-31 12:25:02 -07:00
jeremyds 052f6845f5 ft: ZENKO-713: fix management::SaveconfigurationVersion error handling 2018-08-31 12:25:02 -07:00
jeremyds f961e8e7c5 ft: ZENKO-713: add callback to initManagement
The future management agent process will init the management before to
launch a websocket server. This commit adds an optional callback to the
initManagement function, call once the init is done, with the loaded
overlay as parameter.
2018-08-31 12:25:02 -07:00
jeremyds 506fce2343 ft: ZENKO-713: log msg improvement 2018-08-31 12:25:02 -07:00
jeremyds 4b28638e07 ft: ZENKO-712: linter errors. 2018-08-31 12:25:02 -07:00
bert-e 17712a8181 Merge branch 'bugfix/ZENKO-1024/addPendingFieldReportHandler' into tmp/octopus/w/8.1/bugfix/ZENKO-1024/addPendingFieldReportHandler 2018-08-29 20:04:06 +00:00
Alexander Chan 7bfe526313 bf: ZENKO-1024 add pending field to crr metric reports 2018-08-29 13:03:19 -07:00
bert-e e7f7bb060b Merge branches 'w/8.1/bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config' and 'q/1500/8.0/bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config' into tmp/octopus/q/8.1 2018-08-24 02:39:22 +00:00
bert-e 523998070d Merge branch 'bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config' into q/8.0 2018-08-24 02:39:22 +00:00
bert-e d71b92085c Merge branch 'bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config' into tmp/octopus/w/8.1/bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config 2018-08-24 02:20:12 +00:00
Taylor McKinnon e06584a20e ZENKO-1051 Add redis sentinel support 2018-08-23 17:57:38 -07:00
bert-e 0651fb2aff Merge branch 'bugfix/ZENKO-1013/useBackbeatApiMetrics' into q/8.0 2018-08-24 00:14:47 +00:00
bert-e d04d75f02f Merge branches 'w/8.1/bugfix/ZENKO-1013/useBackbeatApiMetrics' and 'q/1490/8.0/bugfix/ZENKO-1013/useBackbeatApiMetrics' into tmp/octopus/q/8.1 2018-08-24 00:14:47 +00:00
bert-e 5fe345bba6 Merge branch 'bugfix/ZENKO-1013/useBackbeatApiMetrics' into tmp/octopus/w/8.1/bugfix/ZENKO-1013/useBackbeatApiMetrics 2018-08-23 23:46:33 +00:00
Alexander Chan 1fcbcab749 bf: ZENKO-1013 use backbeat api metrics routes for stats 2018-08-23 16:02:32 -07:00
bert-e c0d2cabe68 Merge branch 'bugfix/ZENKO-876-flaky-mpu-test' into tmp/octopus/w/8.1/bugfix/ZENKO-876-flaky-mpu-test 2018-08-23 21:15:14 +00:00
Dora Korpar 746c8a7542 bf: ZENKO 876 increase flaky test timeout 2018-08-23 13:10:16 -07:00
bert-e f0482cf965 Merge branch 'w/8.0/bugfix/ZENKO-945-arsenal-fix-in-s3' into tmp/octopus/w/8.1/bugfix/ZENKO-945-arsenal-fix-in-s3 2018-08-23 18:06:00 +00:00
jeremyds 846f15ad77 Merge remote-tracking branch 'origin/bugfix/ZENKO-945-arsenal-fix-in-s3' into w/8.0/bugfix/ZENKO-945-arsenal-fix-in-s3 2018-08-23 10:41:22 -07:00
bert-e 734511f4fb Merge branch 'w/8.0/bugfix/ZENKO-1018/use-disabled-status' into tmp/octopus/w/8.1/bugfix/ZENKO-1018/use-disabled-status 2018-08-22 19:45:00 +00:00
Bennett Buchanan 8c94b5e390 Merge remote-tracking branch 'origin/bugfix/ZENKO-1018/use-disabled-status' into w/8.0/bugfix/ZENKO-1018/use-disabled-status 2018-08-22 12:24:53 -07:00
bert-e 0125ad278f Merge branches 'w/8.1/feature/ZENKO-925-increaseMetricsExpiry' and 'q/1475/8.0/feature/ZENKO-925-increaseMetricsExpiry' into tmp/octopus/q/8.1 2018-08-09 01:19:32 +00:00
bert-e 24171052fd Merge branch 'feature/ZENKO-925-increaseMetricsExpiry' into q/8.0 2018-08-09 01:19:32 +00:00
bert-e e9ea6ad9c1 Merge branch 'feature/ZENKO-925-increaseMetricsExpiry' into tmp/octopus/w/8.1/feature/ZENKO-925-increaseMetricsExpiry 2018-08-09 01:00:44 +00:00
philipyoo 7a47236b60 ft: ZENKO-925 increase metrics expiry to 24hrs 2018-08-08 17:55:17 -07:00
bert-e e0a3dd1475 Merge branch 'feature/ZNC-22-developer-docs' into tmp/octopus/w/8.1/feature/ZNC-22-developer-docs 2018-08-09 00:07:41 +00:00
LaureVergeron d9694a92ba ZNC-22: DOC: Add developer bootstrap guide 2018-08-08 17:03:21 -07:00
bert-e 49055d540a Merge branch 'bugfix/ZENKO-903/getBucketListOnReport' into q/8.0 2018-08-08 20:28:52 +00:00
bert-e 2152b6cd28 Merge branches 'w/8.1/bugfix/ZENKO-903/getBucketListOnReport' and 'q/1473/8.0/bugfix/ZENKO-903/getBucketListOnReport' into tmp/octopus/q/8.1 2018-08-08 20:28:52 +00:00
bert-e a8664d8dcc Merge branch 'bugfix/ZENKO-903/getBucketListOnReport' into tmp/octopus/w/8.1/bugfix/ZENKO-903/getBucketListOnReport 2018-08-08 20:09:36 +00:00
bert-e 467ad71cdb Merge branch 'bugfix/ZENKO-916-doNotReplicateACLChange' into q/8.0 2018-08-08 19:34:59 +00:00
bert-e a5f94abe3c Merge branches 'w/8.1/bugfix/ZENKO-916-doNotReplicateACLChange' and 'q/1465/8.0/bugfix/ZENKO-916-doNotReplicateACLChange' into tmp/octopus/q/8.1 2018-08-08 19:34:59 +00:00
bert-e f926ee0281 Merge branch 'bugfix/ZENKO-922/addRedisDisconnect' into tmp/octopus/w/8.1/bugfix/ZENKO-922/addRedisDisconnect 2018-08-08 19:03:59 +00:00
Alexander Chan cadb594b31 bf: ZENKO-903 add tests for report itemCount 2018-08-08 11:44:20 -07:00
Alexander Chan 7cef25cdbd improvement: update arsenal 2018-08-08 10:06:10 -07:00
Alexander Chan c05e41a567 bf: ZENKO-922 add redis disconnect tests 2018-08-07 20:38:08 -07:00
bert-e b6c708539d Merge branch 'bugfix/ZENKO-916-doNotReplicateACLChange' into tmp/octopus/w/8.1/bugfix/ZENKO-916-doNotReplicateACLChange 2018-08-06 23:56:28 +00:00
Jonathan Gramain 544d464c6e bf: ZENKO-916 do not replicate ACL change
We should not reset the replication status to PENDING when changing
ACLs of an object. This because we choose not to replicate ACLs to
replication targets. Later on we may see this as a feature but not
now.

As per description in ZENKO-916, it fixes because putting ACLs is the
only known problematic case where it breaks with a transient source
location. E.g. object tags get replicated correctly because we only
replicate tags specifically in such case.
2018-08-06 16:41:30 -07:00
bert-e d6dc136b8b Merge branch 'w/8.0/bugfix/S3C-1586/listing_while_upgrade' into tmp/octopus/w/8.1/bugfix/S3C-1586/listing_while_upgrade 2018-08-02 12:50:11 +00:00
Mathieu Cassagne b4ba5fa7e2 Merge remote-tracking branch 'origin/bugfix/S3C-1586/listing_while_upgrade' into w/8.0/bugfix/S3C-1586/listing_while_upgrade 2018-08-02 14:49:35 +02:00
bert-e 73e3ed2118 Merge branch 'feature/ZENKO-785-metadata-healthcheck' into tmp/octopus/w/8.1/feature/ZENKO-785-metadata-healthcheck 2018-07-31 20:12:15 +00:00
Giacomo Guiulfo 2e40464a38 chore: update arsenal commit hash 2018-07-31 13:04:14 -07:00
Giacomo Guiulfo 6ed42823a1 feat: check metadata in non-deep healthchecks 2018-07-31 13:03:17 -07:00
bert-e bb82fc141d Merge branches 'w/8.1/bugfix/ZENKO-791-quota-update-on-putfail' and 'q/1448/8.0/bugfix/ZENKO-791-quota-update-on-putfail' into tmp/octopus/q/8.1 2018-07-31 00:42:47 +00:00
bert-e c28c2abd6c Merge branch 'bugfix/ZENKO-791-quota-update-on-putfail' into q/8.0 2018-07-31 00:42:47 +00:00
bert-e 32ddcab681 Merge branch 'bugfix/ZENKO-791-quota-update-on-putfail' into tmp/octopus/w/8.1/bugfix/ZENKO-791-quota-update-on-putfail 2018-07-31 00:22:47 +00:00
bert-e 69543302a6 Merge branches 'w/8.1/bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode' and 'q/1452/8.0/bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode' into tmp/octopus/q/8.1 2018-07-31 00:14:26 +00:00
bert-e 69530a75f7 Merge branch 'bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode' into q/8.0 2018-07-31 00:14:26 +00:00
bert-e e3d7f8b57e Merge branch 'bugfix/ZENKO-751-maxObjectKeyLimitVersionBump' into tmp/octopus/w/8.1/bugfix/ZENKO-751-maxObjectKeyLimitVersionBump 2018-07-30 23:29:21 +00:00
philipyoo 155327ea30 bf: ZENKO-751 update arsenal package 2018-07-30 16:27:49 -07:00
bert-e 69324370f0 Merge branch 'bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode' into tmp/octopus/w/8.1/bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode 2018-07-30 23:02:57 +00:00
Dora Korpar 4964fa4b56 bf: ZENKO 791 quota update on put fail 2018-07-30 15:58:16 -07:00
Taylor McKinnon 33b2d5bec0 bf(ZENKO-884): Honor PUSH_STATS in push management mode 2018-07-30 15:55:01 -07:00
bert-e e8e144aefe Merge branch 'bugfix/ZENKO-835-lifecycleShouldNotReplicateDeleteMarkers' into tmp/octopus/w/8.1/bugfix/ZENKO-835-lifecycleShouldNotReplicateDeleteMarkers 2018-07-28 00:37:25 +00:00
Jonathan Gramain 5e17ec8343 bf: ZENKO-835 do not replicate lifecycle actions
All actions coming from lifecycle (or potentially any service account)
are not replicated anymore. This applies now to delete markers created
by lifecycle expiration rules.
2018-07-27 17:35:38 -07:00
bert-e 0d01427051 Merge branch 'w/8.0/feature/S3C-1602-logEndMessageInBackbeatRoutes' into tmp/octopus/w/8.1/feature/S3C-1602-logEndMessageInBackbeatRoutes 2018-07-26 23:23:41 +00:00
bert-e 6e7f5c446a Merge branch 'feature/S3C-1602-logEndMessageInBackbeatRoutes' into tmp/octopus/w/8.0/feature/S3C-1602-logEndMessageInBackbeatRoutes 2018-07-26 23:23:41 +00:00
bert-e 7f463bc437 Merge branch 'improvement/ZENKO-762-redis-ha-metrics' into tmp/octopus/w/8.1/improvement/ZENKO-762-redis-ha-metrics 2018-07-26 01:42:45 +00:00
Rahul Padigela cc0ef104bc improvement: ZENKO-762 read crr stats from Redis HA
This changes the behavior of the report handler to use Redis HA to
read the metrics from instead of localCache
2018-07-25 17:20:33 -07:00
bert-e 09ffe946bc Merge branch 'improvement/ZENKO-819-regional-endpoints' into tmp/octopus/w/8.1/improvement/ZENKO-819-regional-endpoints 2018-07-25 23:12:38 +00:00
Rahul Padigela e71dae7675 improvement: ZENKO-819 use regional endpoints
This commit adds a retry for requests to AWS where the api call fails as the common endpoint
s3.amazonaws.com is not redirected to the regional endpoints anymore. If the original call
recives AuthorizationHeaderMalformed, a GET Bucket location call is made and the endpoint is
updated to use the correct region in it's hostname.
2018-07-25 15:29:55 -07:00
bert-e 261dd48f7f Merge branch 'feature/ZENKO-829-update-utapi-version' into tmp/octopus/w/8.1/feature/ZENKO-829-update-utapi-version 2018-07-24 21:46:58 +00:00
Dora Korpar 8db2d9dd45 feature: ZENKO 829 bump utapi for rc4 2018-07-24 14:44:16 -07:00
bert-e 27c813eaea Merge branch 'q/1393/8.0/feature/ZENKO-777/addLocReplicationReports' into tmp/octopus/q/8.1 2018-07-24 21:21:35 +00:00
bert-e 2263347229 Merge branch 'feature/ZENKO-777/addLocReplicationReports' into q/8.0 2018-07-24 21:21:35 +00:00
bert-e 27364bc4dd Merge branch 'feature/ZENKO-712-management-agent' into q/8.1 2018-07-24 18:21:08 +00:00
jeremyds e12642d9a5 ft: ZENKO-712: remove unused configuration item. 2018-07-24 09:47:51 -07:00
jeremyds 710a01dea6 ft: ZENKO-712: new process to handle management. 2018-07-24 09:47:51 -07:00
jeremyds 20ecdd7a62 ft: ZENKO-712: management push stat, add an error handling
The error was triggered with the new process when shutting down the S3
server, it leaded to an exception.
2018-07-24 09:47:51 -07:00
jeremyds f920c4e4d6 ft: ZENKO-712: handle net error event for management.
Log an error instead of crashing when the management push code fails to
connect to S3.
2018-07-24 09:47:51 -07:00
bert-e 5255c93e02 Merge branch 'improvement/ZENKO-760-no-cluster' into q/8.0 2018-07-23 20:01:57 +00:00
bert-e b55c197c58 Merge branches 'w/8.1/improvement/ZENKO-760-no-cluster' and 'q/1404/8.0/improvement/ZENKO-760-no-cluster' into tmp/octopus/q/8.1 2018-07-23 20:01:57 +00:00
bert-e a61b8fe02b Merge branch 'improvement/ZENKO-760-no-cluster' into tmp/octopus/w/8.1/improvement/ZENKO-760-no-cluster 2018-07-23 19:41:47 +00:00
bert-e 0c3763d418 Merge branch 'bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys' into q/8.0 2018-07-23 19:32:44 +00:00
bert-e 4b8505d3fb Merge branches 'w/8.1/bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys' and 'q/1413/8.0/bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys' into tmp/octopus/q/8.1 2018-07-23 19:32:44 +00:00
bert-e 01d20f4e34 Merge branch 'bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys' into tmp/octopus/w/8.1/bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys 2018-07-23 19:14:40 +00:00
Rahul Padigela 4c138ef967 improvement: ZENKO-760 prepare setup to work without cluster
To be inline with Kubernetes pods sentiment of running one process per pod, the cluster
module usage is removed when workers are configured to be 1. Another change is to move
the metadata setup to make sure connection to MongoDB is in place before accepting any
requests.
The code related to Orbit's management has been adapted to work in non-cluster mode.
2018-07-23 11:25:23 -07:00
bert-e de9fb50734 Merge branches 'w/8.1/improvement/ZENKO-780-configure-push-stats' and 'q/1407/8.0/improvement/ZENKO-780-configure-push-stats' into tmp/octopus/q/8.1 2018-07-23 17:29:18 +00:00
bert-e 486abd42ea Merge branch 'improvement/ZENKO-780-configure-push-stats' into q/8.0 2018-07-23 17:29:18 +00:00
bert-e 14a0356b56 Merge branch 'improvement/ZENKO-780-configure-push-stats' into tmp/octopus/w/8.1/improvement/ZENKO-780-configure-push-stats 2018-07-23 17:09:18 +00:00
bert-e cb49c41559 Merge branch 'q/1374/8.0/improvement/debug-s3-logs' into tmp/octopus/q/8.1 2018-07-23 16:13:29 +00:00
bert-e 94898f2b30 Merge branch 'w/8.0/improvement/debug-s3-logs' into tmp/octopus/q/8.0 2018-07-23 16:13:28 +00:00
Thomas Carmet 37a52620ee bumping cloudserver version to 8.1 2018-07-23 10:20:36 +02:00
Jonathan Gramain 23c016dd3d bf: ZENKO-745 unique location metrics redis keys
Use new 'objectId' field set by Orbit to each location for the redis
key, instead of the location name. This will avoid conflicts if
locations are re-created with the same name.

Take the opportunity to enforce an objectId to be set on each
location, even if the config is from a static file, to make its use
durable.
2018-07-20 18:22:30 -07:00
Giacomo Guiulfo afbd2a771e rf: configure push stats in poll mode 2018-07-20 15:51:21 -07:00
Dora Korpar fada99222f bf: ZENKO 790 redis limit return 2018-07-20 13:09:44 -07:00
bert-e 868435bde8 Merge branch 'improvement/ZENKO-749-removeS3toS3ObjectTestFrom-aws-node-sdk' into q/8.0 2018-07-19 21:58:12 +00:00
bert-e 429ca8b1c6 Merge branch 'development/8.0' into tmp/octopus/w/8.0/improvement/debug-s3-logs 2018-07-19 21:53:42 +00:00
Rahul Padigela 01ada9701c Merge remote-tracking branch 'origin/improvement/debug-s3-logs' into w/8.0/improvement/debug-s3-logs 2018-07-19 13:59:53 -07:00
anurag4dsb e15b021112
improvement: ZENKO-749 removed crr s3 to s3 object test from aws-node-sdk 2018-07-19 13:47:16 -07:00
Jonathan Gramain bed3422f15 bf: ZENKO-660 api proxy fixes
- reject unauthenticated requests on /_/backbeat/api routes:

  - don't allow access to `/_/backbeat/api` passthrough routes for
    public users. It's a temporary option until we have support for
    admin accounts or accounts with admin privileges.

- fix error handling by catching errors raised by http-proxy module
2018-07-19 13:43:58 -07:00
Alexander Chan aac8163c34 ft: ZENKO-777 add pause/resume status reports 2018-07-19 13:39:52 -07:00
Dora Korpar 5ee0a6a310 bf: ZENKO 773 failing lifecycle unit tests 2018-07-19 11:10:15 -07:00
philipyoo 2119e1d195 bf: bump arsenal version changes 2018-07-17 13:35:07 -07:00
Taylor McKinnon da4bfafcfc bf(zenko-473): Handle a unspecified storage quota correctly 2018-07-16 15:02:59 -07:00
Dora Korpar a0ef4ae3d6 bf: ZENKO 726 storage limit 2018-07-13 14:15:16 -07:00
Dora Korpar 2f79846df2 bf: ZENKO 726 storage limit config level 2018-07-13 11:58:48 -07:00
Giacomo Guiulfo c7bc911b91 bugfix: gcp behind an https proxy 2018-07-12 12:48:00 -07:00
Bennett Buchanan aa2b2074b7 bugfix: ZENKO-315 Update create NFS bucket script
* Add node shebang to NFS util
* Add location constraint parameter
2018-07-09 15:27:36 -07:00
Bennett Buchanan 5175639465 bugfix: ZENKO-665 Only create version for replica 2018-07-09 14:21:15 -07:00
Dora Korpar 760fc9e6ea bf: ZENKO 646 allow mdsearch by replication status 2018-07-09 13:34:06 -07:00
Giacomo Guiulfo 96f5dc72a0 ft: add "no_proxy" environment variables 2018-07-09 11:42:56 -07:00
bert-e 5033ecffb2 Merge branch 'feature/ZENKO-679-utapi-config' into q/8.0 2018-07-06 23:42:37 +00:00
bert-e 5cfa98e26a Merge branch 'bugfix/ZENKO-632-readable-destroy' into q/8.0 2018-07-06 23:29:26 +00:00
Salim 64b67cc6ef ft: Add storage quota config 2018-07-06 16:10:30 -07:00
Rahul Padigela 7b2b23b80e bugfix: ZENKO-632 avoid process crash
This brings Arsenal check where it crashes CloudServer when Readable
stream's destroy method is accessed without checking if it is available
2018-07-06 16:03:33 -07:00
bert-e 1545e7d89f Merge branch 'bugfix/ZENKO-635-deleteMarkerReplicatedToRoot' into q/8.0 2018-07-06 23:00:38 +00:00
Jonathan Gramain 0ae315ee6d bf: ZENKO-635 delete marker not replicated correctly
Delete markers were lacking the bucket prefix on the backend keys for
cloud targets, which means they were ineffective in making the actual
objects appearing deleted. Fix this by adding a new "toObjectGetInfo"
method in multiple backends gateway, because right now the
responsibility of generating the key is per backend (IMO should be
reworked to make it global to all backends, but not the time to
refactor this).
2018-07-06 15:03:08 -07:00
bert-e 499e2d3be2 Merge branch 'feature/ZENKO-562' into q/8.0 2018-07-06 21:20:35 +00:00
Dora Korpar 33a9c2becf bf: ZENKO 676 - only location metric 2018-07-06 12:29:01 -07:00
Giacomo Guiulfo ffbe5fc6af feat: set replication endpoints config file path 2018-07-06 10:46:25 -07:00
bert-e 3bd3cf06d3 Merge branch 'bugfix/ZENKO-659-getReplicationInfoWithPreferredRead' into q/8.0 2018-07-05 23:22:25 +00:00
bert-e b51a959286 Merge branch 'improvement/ZENKO-641-websocket-exception' into q/8.0 2018-07-05 22:54:39 +00:00
Rahul Padigela bb6dbb94a1 improvement: log errors to avoid ws throwing exceptions
websocket npm module throws errors in its methods where callbacks are
optional but not passed as a param. To avoid this a logger helper method
is passed a callback so that if an error is raised, it gets logged.
2018-07-05 15:35:07 -07:00
Jonathan Gramain ae17a5bf5b bf: ZENKO-659 fix "storageType" in object replicationInfo
When a preferred read location is defined, the storageType attribute
did not contain that location.

Also fix the lookup of the location type in "x-scal-storage-type" in
backbeat routes: create the array of location types first instead of
looking for a substring directly.
2018-07-05 15:02:05 -07:00
bert-e 8c93fed2b1 Merge branch 'w/8.0/improvement/using-vault-on-all-secrets' into tmp/octopus/q/8.0 2018-07-05 19:57:14 +00:00
bert-e c5a07b99c7 Merge branch 'improvement/using-vault-on-all-secrets' into tmp/octopus/w/8.0/improvement/using-vault-on-all-secrets 2018-07-05 09:30:56 +00:00
Dora Korpar a6d8522950 ft: ZENKO 637 add proxy cert env var 2018-07-03 17:42:06 -07:00
Taylor McKinnon 57ddcd1ef7 Fix: Add Accept-Ranges header to response 2018-07-03 14:38:33 -07:00
Alexander Chan 5f74f7f695 update arsenal dependecy 2018-07-01 14:44:24 -07:00
Alexander Chan 5978c8c82d ft: ZENKO-597 account for transient source in TDM 2018-06-30 16:06:52 -07:00
bert-e 1477c2aec9 Merge branch 'feature/ZENKO-584-crrFailureMetrics' into q/8.0 2018-06-30 21:16:50 +00:00
philipyoo 0d740b6624 ft: add crr metrics failures 2018-06-30 13:56:17 -07:00
bert-e 801cd26e67 Merge branch 'w/8.0/feature/ZENKO-267-MD-Ingestion-Routes' into tmp/octopus/q/8.0 2018-06-30 20:39:55 +00:00
Rahul Padigela 88a51cea19 Merge remote-tracking branch 'origin/feature/ZENKO-267-MD-Ingestion-Routes' into w/8.0/feature/ZENKO-267-MD-Ingestion-Routes 2018-06-30 13:13:23 -07:00
Alexander Chan 2807d6c535 bf: ZENKO 616 use localCache redis 2018-06-30 12:52:57 -07:00
bert-e 5b15a2df3f Merge branch 'feature/ZENKO-610-nfs-replication' into q/8.0 2018-06-30 18:51:31 +00:00
Rahul Padigela 78f49f3597 feature: ZENKO-382 skip version for nfs exported bucket 2018-06-30 11:29:06 -07:00
bert-e 19569c7213 Merge branch 'w/8.0/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies' into tmp/octopus/q/8.0 2018-06-30 09:46:45 +00:00
bert-e 2dda9fb55f Merge branch 'improvement/updateArsenalDependency' into q/8.0 2018-06-30 06:15:11 +00:00
Alexander Chan 2338f4334f improvement: update arsenal dependency 2018-06-29 22:03:58 -07:00
bert-e 2a2193356a Merge branch 'feature/ZENKO-270/checkUploadSize' into q/8.0 2018-06-30 04:53:17 +00:00
bert-e 19b35bf1ed Merge branch 'feature/ZENKO-315/CRRWithoutVersioning' into q/8.0 2018-06-30 04:38:51 +00:00
bert-e 141225bc01 Merge branch 'feature/ZENKO-582-preferred-read' into q/8.0 2018-06-30 04:31:16 +00:00
bert-e c1040cf16e Merge branch 'bugfix/ZENKO-579-skip-scan-func-test' into q/8.0 2018-06-30 04:19:59 +00:00
Bennett Buchanan b8a2c8ede5 feature: ZENKO-315 Allow CRR without versioning 2018-06-29 21:18:24 -07:00
Dora Korpar 9840d96230 ft: ZENKO-582 preferred read location
Change preferred read location implementation to use the
":preferred_read" attribute set in the bucket replication
configuration
2018-06-29 21:13:10 -07:00
Alexander Chan f81865c61f ft: ZENKO-270 add data upload limit 2018-06-29 20:50:56 -07:00
vrancurel 40d20ce2ca bugfix: add a func test for skip scan
Currently this test shall fail on Mongo because
the current code is unable to fetch the 5
common prefixes in one listing call.
This will be fixed by the next Arsenal PR.
2018-06-29 20:50:37 -07:00
vrancurel 29bb06b2af bugfix: versioningGeneral2 were not passing
The bug has been fixed in Arsenal and was related
to some objects being created before the versioning
was enabled in the bucket and therefore did not
jave the versionId property
2018-06-29 20:42:57 -07:00
Rahul Padigela c92176b7d9 feature: ZENKO-600 implement reverse proxy for backbeat api 2018-06-29 20:02:41 -07:00
Alexander Chan deb7a8fa5e bf: ZENKO-624 fix report handler test timer 2018-06-29 19:36:24 -07:00
bert-e 47b57e1a00 Merge branch 'feature/ZENKO-572/orbitSiteMetrics' into q/8.0 2018-06-29 19:11:53 +00:00
Alexander Chan a73b4938ed ft: ZENKO-572 extend CRR stats include to per-site metrics 2018-06-29 11:42:47 -07:00
David Pineau a85a56bd14 Merge remote-tracking branch 'origin/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies' into w/8.0/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies 2018-06-29 16:35:45 +02:00
Taylor McKinnon 84059b13da fix: Disable docker build cache in CI 2018-06-28 16:45:24 -07:00
bert-e 6a91372c21 Merge branch 'improvement/use-commithash' into q/8.0 2018-06-28 22:24:59 +00:00
Rahul Padigela d5d247bec1 improvement: lock scality dependencies using commit hash 2018-06-28 15:01:14 -07:00
bert-e 77443dbb93 Merge branch 'feature/ZENKO-413/gcpVirtualHostSupport' into q/8.0 2018-06-28 18:01:10 +00:00
bert-e 76b98aac72 Merge branch 'feature/ZENKO-315/AddCreateBucketScript' into q/8.0 2018-06-28 17:30:44 +00:00
David Pineau 50ddf5e7a7 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 14:28:37 +02:00
Dora Korpar 5f1beff287 ft: ZENKO 142 location storage quota 2018-06-27 16:46:48 -07:00
Alexander Chan d6e1c6a4fb ft: add hostnames 2018-06-27 13:38:40 -07:00
Alexander Chan 286fba8bdb ft: ZENKO-413 add GCP support virtual hosts url 2018-06-27 13:38:40 -07:00
anurag4DSB 1691eae108
ft: ZENKO-20 add prom-monitoring for cloudserver
Signed-off-by: anurag4dsb <anurag.mittal@scality.com>
2018-06-27 10:28:37 -07:00
bert-e 41d1ff1144 Merge branches 'w/8.0/feature/ZENKO-586-file-backend-eve' and 'q/1316/7.4/feature/ZENKO-586-file-backend-eve' into tmp/octopus/q/8.0 2018-06-27 00:50:48 +00:00
bert-e b7cec3aa26 Merge branch 'feature/ZENKO-586-file-backend-eve' into tmp/octopus/w/8.0/feature/ZENKO-586-file-backend-eve 2018-06-27 00:30:17 +00:00
jeremyds 7d922f0c33 bf: ZENKO-308 remove versioningGeneral1 test skiping with mongoDB
Now the listing bug has been fixed in Arsenal, it is time to make the
versioningGeneral1 test run with mongoDB back-end.
2018-06-26 17:05:38 -07:00
bert-e adf1e638c8 Merge branch 'feature/orbit-capabilities' into q/8.0 2018-06-26 23:23:14 +00:00
Rached Ben Mustapha e6781cf9e6 ft: Add capabilities in Orbit reporting 2018-06-26 16:06:18 -07:00
bert-e 19ba37caa3 Merge branch 'feature/ZENKO-143-transientSource' into q/8.0 2018-06-26 23:02:14 +00:00
bert-e a7fc5eb43e Merge branch 'feature/ZENKO-596-add-redis-ha' into q/8.0 2018-06-26 21:22:55 +00:00
Jonathan Gramain ab5d69d825 ft: ZENKO-143 support 'isTransient' location property 2018-06-26 13:30:29 -07:00
bert-e 62ad3b41f2 Merge branch 'bugfix/ZENKO-576/fixLocationSupportsVersioning' into q/8.0 2018-06-26 19:52:53 +00:00
Salim 455a0800d2 feature: add redis HA options 2018-06-26 11:55:51 -07:00
Bennett Buchanan 9b5dcea4e0 feature: ZENKO-315 Script create bucket with NFS 2018-06-26 11:07:28 -07:00
Thomas Carmet 1784e6fe9f Merge remote-tracking branch 'origin/feature/improve-docker-build-tags' into w/8.0/feature/improve-docker-build-tags 2018-06-26 16:57:00 +02:00
Alexander Chan b2ec8ba994 bf: ZENKO-576 set missing supportsVersioning in loc details 2018-06-25 20:43:47 -07:00
Rahul Padigela a65eb80873 bugfix: reduce flakiness of server startup wait 2018-06-25 17:26:07 -07:00
Rahul Padigela 8937152a78 bugfix: add isNFS property to the expected result 2018-06-25 16:58:43 -07:00
Jonathan Gramain 8874f97045 ft: ZENKO-143 batchDelete backbeat route
- Implement 'POST /_/backbeat/batchdelete' backbeat route to get rid
  of an array of data locations. The route will be used by the garbage
  collector service.

  * This includes some reorganization of backbeat routes sanity checks

- Handle gracefully 404 errors from datastore backend:

  * no need to retry 404 errors as they are permanent

  * batch delete can also gracefully handle missing data locations and
    still delete other existing locations (may happen if a retry
    occurs, when replaying a kafka message among other cases).

- Support service-gc service account with account owner access rights.
2018-06-22 16:41:21 -07:00
Rached Ben Mustapha 5543c13fd5 ft: Make websocket use proxies 2018-06-22 11:11:07 -07:00
Rached Ben Mustapha 546d6f157b ft: Add optimized dependencies for ws 2018-06-22 11:11:07 -07:00
Jonathan Gramain 9d86dfad53 ft: ZENKO-143 main location in loc constraint header
Allow setting the main object location name in the
x-amz-location-constraint header, not only replication targets.
2018-06-21 13:45:28 -07:00
bert-e 3aeba783e3 Merge branch 'bugfix/ZENKO-576-fixVersioningBackendSupportCheck' into q/8.0 2018-06-21 19:34:12 +00:00
bert-e 27b425cd61 Merge branch 'bugfix/ZENKO-567-locationHeaderCheckException' into q/8.0 2018-06-21 19:21:48 +00:00
Jonathan Gramain 839ac21161 bf: ZENKO-576 add missing 'supportsVersioning' flag
Add 'supportsVersioning' flag to file and mem locations.
2018-06-21 10:21:05 -07:00
bert-e 52281eb7c6 Merge branch 'bugfix/ZENKO-575-package-regression' into q/8.0 2018-06-21 01:39:56 +00:00
Salim b89142c9a2 bf: Add back start_mongo
ZENKO-575 fixes regresssion that caused legacy use of start_mongo to stop working
2018-06-20 17:47:03 -07:00
bert-e 2d0413910e Merge branch 'w/8.0/feature/ZENKO-573-s3-eve-artifacts' into tmp/octopus/q/8.0 2018-06-21 00:33:27 +00:00
bert-e abc29538d9 Merge branch 'bugfix/ZENKO-498/fixSproxydLocationConfig' into q/8.0 2018-06-21 00:32:47 +00:00
bert-e 44f3c4e329 Merge branch 'feature/ZENKO-573-s3-eve-artifacts' into tmp/octopus/w/8.0/feature/ZENKO-573-s3-eve-artifacts 2018-06-21 00:05:07 +00:00
Stefano Maffulli eb2a6009ed Added info to submit package to NPM
Following instructions on npmjs.org, renamed the package name to
@zenko/cloudserver because 'cloudserver' alone could not be used
because it clashed with pre-existing projects.

The 8.0.0-beta version was picked at random, should be changed.
2018-06-20 16:01:56 -07:00
bert-e 8b70872b1a Merge branch 'feature/ZENKO-551-mutliple-backend-tests' into tmp/octopus/w/8.0/feature/ZENKO-551-mutliple-backend-tests 2018-06-20 21:32:46 +00:00
Thomas Carmet 6cedb7f4aa Merge branch 'development/7.4' into feature/last-forward-port 2018-06-20 14:15:24 -07:00
Jonathan Gramain d43518075b bf: ZENKO-567 enforce "details" in location constraint
Make sure when Orbit configuration is applied, a "details" attribute
is always present in the locations config.
2018-06-20 13:55:18 -07:00
Jonathan Gramain c7633a772a bf: ZENKO-567 config validation may crash
Config validation may crash if locationConstraint[x].details is not
set because of a reversed check.

NOTE: unit tests in locConstraintAssert.js are broken (regexp
line-wrap does not work), let them alone, not worth the time since
they will hopefully be replaced by more robust config parsing in the
near future.
2018-06-20 13:55:18 -07:00
Alexander Chan 00c42ffcfa bf: ZENKO-498 - fix sproxy location config 2018-06-20 13:52:58 -07:00
bert-e c05e6c39db Merge branch 'improvement/ZENKO-547-removeCirleCI' into q/8.0 2018-06-20 20:52:32 +00:00
bert-e 16765466d1 Merge branch 'feature/ZENKO-560/addOrbitAdvOptSupport' into q/8.0 2018-06-20 00:58:20 +00:00
Rahul Padigela 009a012b47 feature: S3C-1212 support expire metrics config
(cherry picked from commit f264078e8c)
2018-06-19 17:38:28 -07:00
Alexander Chan f702c62c3f ft: add cloudserver support for advanced options 2018-06-19 16:15:26 -07:00
Thomas Carmet 2dba0c0c71 ft: adding secrets for 7.4 2018-06-19 15:12:04 -07:00
VR 22e9bf1eb7
Merge pull request #1290 from scality/improvement/packageNameChange
improvement: name change for npmjs
2018-06-19 13:50:47 -07:00
vrancurel 5c6924f35c improvement: name change for npmjs 2018-06-19 13:49:27 -07:00
Salim 94a1e98e8e bugfix: Fix data server regression
ZENKO-464 Fixes regression that prevented s3 from starting properly
and renames the start_mongo script to cloudserver for clear definition.
Regression introduced in commit 1fd51c8fe1
2018-06-18 15:05:49 -07:00
Rahul Padigela 0f021af79d improvement: remove enterprise circle ci 2018-06-18 00:18:16 -07:00
Salim 25441f7a60 feature: push images to private registry
ZENKO-476 after building post merge images, they will be pushed
to the private registry for E2E testing.
2018-06-15 17:33:31 -07:00
Rahul Padigela 70049b7336
Merge pull request #1272 from scality/bugfix/ZENKO-436/pickReadLocationFix
bugfix/ZENKO-436 retrieve correct read location constraint
2018-06-15 11:45:19 -07:00
Alexander Chan 79b78997f0 bf: ZENKO-436 retrieve correct read location constraint
(cherry picked from commit b7fb379703)
+ some typo fixes
2018-06-14 16:32:30 -07:00
Rahul Padigela a8943775c9
Merge pull request #1278 from scality/feature/ZENKO-485-eve-on-s3
Feature/zenko 485 eve on s3
2018-06-13 14:14:47 -07:00
Salim 387cac920e feature: Speed up Docker rebuild using cache
Allows faster CI and dev builds of the Docker image through the use
of cached layers.
2018-06-13 13:33:14 -07:00
Salim 640701166e feature: ZENKO-482 Eve CI
This integrates the mongo ft_tests suite into the Eve CI using
kube workers that can be the template for future workers and porting
the remaining tests over.
2018-06-13 13:33:14 -07:00
Rached Ben Mustapha 99d6f6f2a4
Merge pull request #1242 from scality/ft/ZENKO-319-sproxydLocationType
ft: ZENKO-319 adds sproxyd location type management
2018-06-07 13:38:51 -07:00
Alexander Chan 323a69cc17 ft: ZENKO-319 adds sproxyd location type management 2018-06-07 10:10:28 -07:00
Rached Ben Mustapha 09b8d17a61
Merge pull request #1271 from scality/fix/ZENKO-471-no-subbyte-report
fix: Ensure CRR reports are full bytes
2018-06-07 09:19:48 -07:00
Rached Ben Mustapha cf175847fb fix: Ensure CRR reports are full bytes 2018-06-06 16:28:57 -07:00
Rahul Padigela 69b37a376d
Merge pull request #1254 from scality/ft/ZENKO-404-lifecycleServiceAccount
ft: ZENKO-404 lifecycle service account
2018-06-01 15:52:36 -07:00
Jonathan Gramain b627c3219f ft: ZENKO-404 lifecycle service account
Add support for service account 'service-lifecycle' in addition to
existing 'service-replication'. ACL checks are shunted in both cases,
we may implement more fine-grained access control later.
2018-06-01 12:37:42 -07:00
Rahul Padigela a7905b8df9 chore: update version and dependencies 2018-05-31 11:48:28 -07:00
Rahul Padigela d396acf8fe
Merge pull request #1259 from scality/fwdport/z/1.0-master
Fwdport/z/1.0 master
2018-05-30 10:46:16 -07:00
Rahul Padigela 518e8c45c7 chore: use master dependency branches 2018-05-29 16:35:24 -07:00
Rahul Padigela 0025bdd3ec Merge remote-tracking branch 'origin/z/1.0' into fwdport/z/1.0-master 2018-05-29 16:34:17 -07:00
Rahul Padigela 7ac9ac6ac3
Merge pull request #1256 from scality/bf/lockMocksModule
bf: lock node-http-mocks module
2018-05-25 14:24:21 -07:00
Rahul Padigela 9af917ecf6 bf: lock node-http-mocks module
node-http-mocks version 1.7.0 has some breaking changes which fails the tests.
Fixing this version for now and we will revisit this when we do npm dependcy updates.
2018-05-25 12:33:29 -07:00
Rahul Padigela d758eddf71
Merge pull request #1250 from scality/bf/ZENKO-355-byteToMBConversion
bf: ZENKO-355 crr stats byte conversion
2018-05-22 19:16:31 -07:00
philipyoo 84e97d1350 bf: ignore gem install digest
Error installing digest gem because it is part of the
standard library and the gem is no longer available for
install
2018-05-22 19:10:16 -07:00
philipyoo acb5d72f32 bf: receive/send crr metrics sizes as bytes 2018-05-22 15:02:52 -07:00
Rahul Padigela 0712aa4246
Merge pull request #1241 from scality/bf/ZENKO-314-maxKeysZero
Bf/zenko 314 max keys zero
2018-05-17 11:56:04 -07:00
Alexander Chan 67eb20c1e4 bf: ZENKO-314 add check for max keys zero object listing 2018-05-16 15:40:29 -07:00
Alexander Chan 0b85e45ba8 ft: ZENKO-347 skip MongoDB version listing 2018-05-16 13:39:42 -07:00
Rahul Padigela 024544b726
Merge pull request #1233 from scality/test/orbit-management
Unit testing Orbit Management
2018-05-15 16:46:38 -07:00
Nicolas Humbert d88f04271f test: Orbit management 2018-05-15 15:14:05 -07:00
Rahul Padigela d77ff383bf
Merge pull request #1248 from scality/ft/ZENKO-347-skipMongoDbListingTests
Ft/zenko 347 skip mongo db listing tests
2018-05-13 14:21:33 -07:00
Rahul Padigela 08d9e6f45f
Merge pull request #1247 from scality/bf/ZENKO-350-addMDSearchValidation
Bf/zenko 350 add md search validation
2018-05-13 14:20:40 -07:00
Alexander Chan 4aaca47100 ft: ZENKO-347 skip MongoDB version listing 2018-05-11 19:35:36 -07:00
Alexander Chan f5132b911f bf: ZENKO-350 add MD search validation 2018-05-11 19:35:11 -07:00
Alexander Chan f2f2a5ea04 bf: ZENKO-349 add mongodb metadata env
Adds mongodb metadata environment variable to run mongodb only tests
2018-05-11 18:53:59 -07:00
Rached Ben Mustapha 56e60ab38e
Merge pull request #1244 from scality/fix/ZENKO-345-unduplicate-redis-settings
fix: use existing localCache redis configuration
2018-05-11 17:12:40 -07:00
Rached Ben Mustapha 3f7add2ca7 fix: use existing localCache redis configuration 2018-05-11 10:13:58 -07:00
Rahul Padigela ea9645c494
Merge pull request #1243 from scality/bf/ZENKO-332-azure-proxy
bf: ZENKO-332 fix Azure proxy localhost req
2018-05-11 10:09:30 -07:00
Dora Korpar 2e4234ea05 bf: ZENKO-332 fix Azure proxy localhost req 2018-05-10 20:33:53 -07:00
Nicolas HUMBERT a011400c69
Merge pull request #1225 from scality/rf/S3C-1399/refactor-backbeat-metrics-into-arsenal
rf: S3C-1399 Use CRR metrics from Arsenal
2018-04-30 10:06:35 -07:00
Rahul Padigela a79b5de1c3
Merge pull request #1232 from scality/bf/ZENKO-250-regexEval
bf: ZENKO-250 correctly evaluate regex pattern
2018-04-30 09:59:12 -07:00
Bennett Buchanan 6722336be7 rf: S3C-1399 Use CRR metrics from Arsenal 2018-04-27 17:05:22 -07:00
Alexander Chan ba820e5661 bf: ZENKO-250 correctly evaluate regex pattern
Original code will evaluate regex in `/pattern/` syntax incorrectly
Adds parser to have MD search recognize if a regex is in `/pattern/` syntax or
a simple string
2018-04-27 16:51:53 -07:00
Rahul Padigela 5d71417216
Merge pull request #1220 from scality/doc/ZENKO-259-mdsearch
doc: ZENKO-259 MD Search
2018-04-27 16:34:20 -07:00
Rahul Padigela e7a1ab2c0d
Merge pull request #1223 from scality/rf/S3C-1399/updateRedisConfig
rf: S3C-1399 Update Redis configuration
2018-04-27 14:52:39 -07:00
Alexander Chan 0ae117564a doc: ZENKO-259 mdsearch 2018-04-27 11:08:31 -07:00
Bennett Buchanan 399ecf5b6c rf: S3C-1399 Update Redis configuration 2018-04-27 10:38:18 -07:00
Rahul Padigela b603463f1e
Merge pull request #1227 from scality/ft/ZENKO-262-preferredReadWiteLocations
Ft/zenko 262 preferred read write locations
2018-04-27 10:30:07 -07:00
Nicolas HUMBERT 8a291caf9f
Merge pull request #1229 from scality/bf/ZENKO-275/mpuPropertiesGCP
bf: ZENKO-275 CRR to GCP MPU properties
2018-04-27 10:04:26 -07:00
Bennett Buchanan 0b9b203762 bf: ZENKO-275 CRR to GCP MPU properties 2018-04-26 19:00:07 -07:00
Alexander Chan 35f457d087 ft: ZENKO-262 preferred read/write locations 2018-04-26 16:51:34 -07:00
Nicolas HUMBERT d7595938c8
Merge pull request #1175 from scality/fix/do-not-include-untrimmed-headers-for-MPU-to-aws
FIX: Do not include untrimmed headers
2018-04-25 13:36:07 -07:00
Nicolas HUMBERT d52762c805
Merge pull request #1204 from scality/ft/ZENKO-229-get-using-location
ft/ZENKO-229 get using location
2018-04-25 11:36:37 -07:00
Bennett Buchanan d02dbebbf5 FIX: Do not include untrimmed headers 2018-04-25 10:22:34 -07:00
Rahul Padigela 6ef23a0659
Merge pull request #1228 from scality/ft/ZENKO-141-scality-dataserver
MongoDB as metadata and Scality as a data ba…
2018-04-24 18:59:07 -07:00
Rahul Padigela 9e1e00c05c
Merge pull request #1218 from scality/rf/ZENKO-250-portmdsearchtest
Rf/zenko 250 portmdsearchtest
2018-04-24 17:44:27 -07:00
Alexander Chan 6d67e98d96 rf: ZENKO-250 port md search test 2018-04-24 16:11:15 -07:00
Salim 1fd51c8fe1 ft: ZENKO-141 allows MongoDB/metadata Scality/data
Allows for the case where both MongoDB and Scality backend is enabled
and therefore would not require dataserver to be started
2018-04-24 12:10:17 -07:00
Nicolas HUMBERT 35d12ea43a
Merge pull request #1199 from scality/ft/S3C-1391-update-bucketinfo-uid
ft: edit ModelVersion usage of property uid on buckets
2018-04-24 11:46:10 -07:00
Dora Korpar d6e8201283 ft: ZENKO-229 get obj based on location 2018-04-24 10:38:19 -07:00
philipyoo a6265ab76d ft: edit usage of uid on buckets
Add uid on all buckets for future use and should be
backwards compatible.
2018-04-24 10:03:34 -07:00
Rahul Padigela 109ca1f98e chore: update scality dependencies 2018-04-23 12:45:21 -07:00
Rahul Padigela 92e8b1e96a chore: update version, description and author 2018-04-23 12:33:23 -07:00
Rahul Padigela 3c922e0f4a
Merge pull request #1224 from scality/fwdport/7.4-beta-master2
Fwdport/7.4 beta master2
2018-04-23 09:45:23 -07:00
Rahul Padigela 73f32773a1 chore: add package-lock.json 2018-04-23 00:09:05 -07:00
Rahul Padigela bb372c7991 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master2 2018-04-20 11:13:49 -07:00
Rahul Padigela 4ab977cd0f
Merge pull request #1219 from scality/fwdport/7.4-7.4-beta1
Fwdport/7.4 7.4 beta1
2018-04-20 09:59:24 -07:00
Rahul Padigela c2cf9cb82f
Merge pull request #1222 from scality/fx/management
fx: Orbit management - config userName + redundancy
2018-04-20 09:58:49 -07:00
Nicolas Humbert 1fecdcc19f fx: Using account userName from management configuration 2018-04-19 16:13:13 -07:00
Nicolas Humbert d7b4bdf143 fx: removing redundant getUUID() in Orbit management 2018-04-19 15:38:13 -07:00
Rahul Padigela 3e0ff40d7b Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta1 2018-04-19 10:55:50 -07:00
Rahul Padigela eb9729619e
Merge pull request #1207 from scality/ft/more-s3-locations-support
Ft/more s3 locations support
2018-04-18 15:40:28 -07:00
Alexander Chan 460dd0d83b add supportsVersioning unit tests 2018-04-17 16:46:03 -07:00
Alexander Chan dad7ee2be6 healthcheck: versioning optional 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha aada239a85 Handle http s3 endpoints 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha 822a2ce693 Rely on location for PutBucketVersioning 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha fd8905aae7 Make versioning optional for aws_s3 locations 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha 6907df7a8e Configure versioning and path style for s3 locations 2018-04-17 16:46:03 -07:00
Rahul Padigela f8b41c9033
Merge pull request #1216 from scality/ft/use-uuid
rf: use uuid instead of date
2018-04-17 16:27:45 -07:00
Alexander Chan 216eca2609 rf: use uuid instead of date 2018-04-17 11:14:56 -07:00
Rahul Padigela c91e4ba7d5
Merge pull request #1213 from scality/fix/dockerentrypoint
fix: docker entrypoint for whitelisting healthcheck
2018-04-16 17:53:19 -07:00
Rahul Padigela fa0be87398 fix: docker entrypoint for whitelisting healthcheck 2018-04-16 17:52:04 -07:00
Rahul Padigela d899aae079
Merge pull request #1211 from scality/fix/Zenko-deployment
ft: add configurable healtcheck whitelisting
2018-04-16 17:17:29 -07:00
Rahul Padigela 4dfa7b0486 ft: add configurable healtcheck whitelisting 2018-04-16 16:59:24 -07:00
Rahul Padigela dbf1cf43d9
Merge pull request #1210 from scality/backport/master-rel/7.4-beta
backport master to rel/7.4-beta
2018-04-16 13:36:21 -07:00
Rahul Padigela 8a6763da83
Merge pull request #1203 from scality/ft/secure-channel
Ft/secure channel
2018-04-15 23:47:00 -07:00
Rached Ben Mustapha 4b9216c9a4 Plug push-based management 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha 6a286f32a9 Add push-based management module 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha 75e9012a11 Add ws dependency 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha a8a2807e0d Split management module 2018-04-13 22:59:40 -07:00
Dora Korpar 1b008b4e9f ft: add date modified headers as condition for object delete
(cherry picked from commit a02d226b4c)
2018-04-13 17:22:26 -07:00
Rahul Padigela 76e2a2d03f
Merge pull request #1161 from scality/RF/updateIssueTemplateToMentionDiscourse
RF: ZNC-26: Issue Template to highlight Discourse
2018-04-13 17:01:11 -07:00
LaureVergeron 3db21278c5 RF: ZNC-26: Issue Template to highlight Discourse 2018-04-13 14:20:13 +02:00
Rahul Padigela 0fd0493654
Merge pull request #1188 from scality/fix/mongo-tests
Fix mongo tests
2018-04-11 09:53:20 -07:00
Salim a79e9080cd Fix mongo tests 2018-04-10 21:29:00 -07:00
Rahul Padigela abc4fd2c75
Merge pull request #1145 from scality/ft/objdel-add-modified-header-check
Ft/objdel add modified header check
2018-04-10 20:20:29 -07:00
Dora Korpar a02d226b4c ft: add date modified headers as condition for object delete 2018-04-10 17:05:38 -07:00
Rahul Padigela 11641b9fbd
Merge pull request #1202 from scality/fx/ft_management
FX: Running ft_management test with REMOTE_MANAGEMENT_DISABLE=1
2018-04-10 16:42:47 -07:00
Nicolas Humbert 72e646dc78 FX: Running ft_management test with REMOTE_MANAGEMENT_DISABLE=1 2018-04-10 15:10:19 -07:00
Rahul Padigela 227902877a
Merge pull request #1200 from scality/bf/ZENKO-144-https-location
Bf/zenko 144 https location
2018-04-10 14:06:41 -07:00
Rahul Padigela 3ef0caa0ba bf: pick http agent for non-ssl location backends 2018-04-10 09:07:39 -07:00
Rahul Padigela 881c909ef8 test: assert options for aws_s3 location constraints 2018-04-10 09:07:35 -07:00
Rahul Padigela 7c6df7a783 chore: proxyCompareUrl - address naming 2018-04-09 18:13:33 -07:00
Bennett Buchanan cf90391e86
Merge pull request #1154 from scality/ft/ZENKO-158/add-gcp-support-for-one-to-many
FT: Add changes for replication with GCP
2018-04-09 18:06:46 -07:00
Bennett Buchanan 85dee35b43 FT: Add changes for replication with GCP 2018-04-09 13:00:55 -07:00
Rahul Padigela c1a95fa1a9
Merge pull request #1198 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-09 12:46:33 -07:00
Alexander Chan 2426c9cc5a Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-09 10:26:40 -07:00
Rahul Padigela ef1dcee6f3
Merge pull request #1183 from scality/forward/orbit
Forward/orbit
2018-04-05 15:59:03 -07:00
Rahul Padigela 16217d852b
Merge pull request #1195 from scality/ft/ZENKO-225-o2m-s3conn
ft: provide pathStyle option to support S3C
2018-04-05 15:43:26 -07:00
Rahul Padigela 2fbd5ce8cc ft: provide pathStyle option to support S3C
This feature allows setting pathStyle requests option for location constraints so that
it can be relaxed for non-AWS S3 backends (for example S3 Connector)
2018-04-05 11:54:26 -07:00
Nicolas Humbert ed3ee6deae S3C-1354 Grant replication user permission to read/write buckets/objects 2018-04-05 10:59:15 -07:00
Nicolas HUMBERT b505656e86 FX: functional tests (#1190)
* FX: functional tests

* [SQUASH ME] FX TESTS: cleanup(config) changed
2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 61bb309af6 Make mongodb database configurable 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 9d1cd39d15 Disable remote management in tests 2018-04-05 10:59:15 -07:00
anurag4DSB af3ea03bcf changed PR as adviced in the review
Prom-client working for S3

corrected content type
2018-04-05 10:59:15 -07:00
anurag4DSB a612735448 finished S3 prom client integration
Signed-off-by: anurag4DSB <anurag.mittal@scality.com>
2018-04-05 10:59:15 -07:00
Anurag Mittal 8bf35f7b1d Tested and completed prom-client for S3
Signed-off-by: anurag4DSB <anurag.mittal@scality.com>
2018-04-05 10:59:15 -07:00
Nicolas Humbert bf811ecd87 S3C-1348 FT: Integrating 1-many locations replication into Orbit 2018-04-05 10:59:15 -07:00
Salim 8fe22c0b55 fix mongodb hosts entrypoint 2018-04-05 10:59:15 -07:00
Salim e54318d1c6 Multiple endpoints 2018-04-05 10:59:15 -07:00
Bennett Buchanan c760229757 FT: Add CRR statistics for Orbit (#1162) 2018-04-05 10:59:15 -07:00
Alexander Chan 06d2dbc8e7 FT: Adds support for GCP data backend 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha ebf131696b Fix: limit exported config items
Only send `overlayVersion` for now.
2018-04-05 10:59:15 -07:00
LaureVergeron b443b925a5 FT: ZENKO-118: Add support for Wasabi data backend 2018-04-05 10:59:15 -07:00
vrancurel 343b658d5f use hosts instead of host and port 2018-04-05 10:59:15 -07:00
Lauren Spiegel 337b049298 FT: Switch out mongo search for spark 2018-04-05 10:59:15 -07:00
Lauren Spiegel 798c42d6e3 TEMP: Disable remote mgmt for mem 2018-04-05 10:59:15 -07:00
Lauren Spiegel a95018a87b TEMP: Special treatment for clueso
Once orbit handles identities, all service accounts
should use the proper service account canonical id format
and canonical id's should be individually generated.
2018-04-05 10:59:15 -07:00
Lauren Spiegel 7828c0d56b FT: Add changes for clueso 2018-04-05 10:59:15 -07:00
Lauren Spiegel 2870477c61 chore: additional lint fixes 2018-04-05 10:59:15 -07:00
JianqinWang 1d191f4c5d bf: fix linter errors 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha fe129c7848 Ignore data/metadata dirs 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 126406738e Initial management implementation 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 36ff274b24 Default to S3DATA=multiple 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha e976c4d46e Update default configuration for Zenko 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha f474e7ee40 Pre-provision instance ID 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha e8e92871d5 Support target host header 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha 79c6c57760 FT: Less verbose report handler 2018-04-05 10:57:59 -07:00
Rahul Padigela 9bc2b7379c
Merge pull request #1193 from scality/bf/proxy-config
bf: proxy config
2018-04-04 10:22:05 -07:00
Dora Korpar 33bac9802b bf: proxy config 2018-04-03 17:02:51 -07:00
Rahul Padigela 01f23805ad
Merge pull request #1189 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-02 12:31:03 -07:00
Alexander Chan 7d5b22b330 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-02 11:00:58 -07:00
Rahul Padigela 74ba89ec34
Merge pull request #1179 from scality/ft/ZENKO-144-https-proxy
Ft/zenko 144 https proxy
2018-04-02 10:33:34 -07:00
Salim d0ef347a31 ZENKO-144 Squid proxy docker file 2018-03-30 17:10:29 -07:00
Dora Korpar d923af04ba ZENKO-144 ft: https proxy 2018-03-30 17:10:29 -07:00
Rahul Padigela 063086c7bb
Merge pull request #1185 from scality/bf/content-length-type
bf: parse content length to int
2018-03-30 13:59:19 -07:00
Alexander Chan 6f9ee224f2 bf: parse content length to int 2018-03-30 11:33:43 -07:00
Rahul Padigela 42abc5ae02
Merge pull request #1182 from scality/bf/merge-error
BF: fix merge conflict error
2018-03-28 19:11:54 -07:00
Alexander Chan c632635cfc bf: removed duplicate function 2018-03-28 14:44:49 -07:00
Rahul Padigela 7902709abc
Merge pull request #1171 from scality/ft/S3C-1327-update-ModelVersion-md
ft: update ModelVersion.md
2018-03-27 16:33:45 -07:00
Rahul Padigela 5dff2506a4
Merge pull request #1169 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 15:31:19 -07:00
philipyoo 7b1ca7fec6 ft: update ModelVersion.md 2018-03-27 13:26:18 -07:00
Alexander Chan 502c1a4c11 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-23 11:29:00 -07:00
Rahul Padigela c1bda2b9ea
Merge pull request #1178 from scality/fx/missing-abort-prop
BF: GcpManagedUpload
2018-03-23 11:26:26 -07:00
Alexander Chan 8d688738f7 fx: missing error object in logHelper call 2018-03-22 14:24:16 -07:00
Rahul Padigela 48eeb9c501
Merge pull request #1118 from scality/ft/S3C-1115/GCP-S3-Func
FT S3C-1115: GCP S3 Functional Tests
2018-03-22 10:12:18 -07:00
Alexander Chan 1a44c52f30 S3C-1115 FT: Adds GCP Functional Tests
Adds S3 functional tests for GCP backend: GET/PUT/DEL
2018-03-21 21:04:17 -07:00
Rahul Padigela acda681fc1
Merge pull request #1176 from scality/fwd/7.4-orig-beta
Foward port rel/7.4 to rel/7.4-beta
2018-03-21 12:20:54 -07:00
Alexander Chan 66b6dc19ab FX: missing abort call object properties 2018-03-21 12:12:14 -07:00
Alexander Chan 60cce4a65f Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-orig-beta 2018-03-20 16:04:05 -07:00
Alexander Chan df1d26abad Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-19 15:24:47 -07:00
Rahul Padigela 801052b99e
Merge pull request #1164 from scality/ft/gcp-to-7.4-beta
Merge ft/GCP to rel/7.4-beta
2018-03-16 15:23:55 -07:00
Alexander Chan 6dfec0655a Merge remote-tracking branch 'origin/ft/GCP' into rel/7.4-beta 2018-03-16 11:13:24 -07:00
Bennett Buchanan 70379f2a42
Merge pull request #1163 from scality/fx/gcp-add-err
fx: add err
2018-03-16 10:41:26 -07:00
Alexander Chan cd5526188f fx: add err 2018-03-15 18:33:03 -07:00
Rahul Padigela a4cbbe0fcc ft: update package.json dependencies 2018-03-14 13:06:24 -07:00
Alexander Chan e5f966d0fa
Merge pull request #1123 from scality/ft/S3C-1179/GCP-S3-MPU
Ft/s3 c 1179/gcp s3 mpu
2018-03-10 19:59:21 -08:00
Alexander Chan 9c3a8f1573 Switch from npm to yarn 2018-03-09 18:18:49 -08:00
Rahul Padigela 208c024789
Merge pull request #1148 from scality/fwdport/7.4-master
Fwdport/7.4 master
2018-03-09 16:15:47 -08:00
Alexander Chan 8b629252e4 S3C-1179 FT: Adds GCP MPU Func
Adds MPU functionalities to GCP Backend
2018-03-09 15:06:16 -08:00
Rahul Padigela db6e8f35d5 Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-master 2018-03-09 13:41:15 -08:00
Rahul Padigela 0fc6e720e3
Merge pull request #1141 from scality/rf/back-to-xml
S3C-1115: rf: removes JSON API usage in GCP
2018-03-07 20:59:34 -08:00
Alexander Chan ecf9fe75bd rf: remove JSON API 2018-03-07 18:49:41 -08:00
Alexander Chan 2a70d20732 fx: GCP CopyObject Result Parse
Parse response values to correct type for copyObject method.
2018-03-07 18:47:56 -08:00
Bennett Buchanan d280b22d14 FIX: Remove GCP versioning healtchcheck on buckets 2018-03-07 18:47:56 -08:00
Alexander Chan cffb8a1914 FIX: Add GCP to externalVersioningErrorMessage 2018-03-07 18:47:56 -08:00
Alexander Chan 118a091ae5 S3C-1115 FT: Adds GCP functionality to S3
Adds common files/changes for adding GCP as a backend in S3.
2018-03-07 18:47:56 -08:00
Alexander Chan c3fc35dcf3 S3C-1115 FT: GCP Object Tagging
Implements GCP Object Tagging PUT/GET/DELETE APIs
2018-03-07 18:47:56 -08:00
Alexander Chan 6c03f6de30 S3C-1179 FT: GCP Upload Method
Adds a GcpManagedUpload for the upload method to handle stream uploads.
The GcpManagedUpload will handle the switching between putObject or
multipart upload depending on the given content size.
2018-03-07 18:47:56 -08:00
Alexander Chan 775be47f59 S3C-1179 FT: GCP MPU APIs
Implements GCP MPU APIs: listParts, uploadPart, uploadPartCopy,
abortMPU, createMPU, and completeMPU
Implements MPU Helper Class
2018-03-07 18:47:56 -08:00
Alexander Chan 7fd3f05d27 FX: GCP Credentials
Fixes the bug where it doesn't handle retrieving environment variables
with '\n', newline, correctly.
2018-03-07 18:47:56 -08:00
Alexander Chan 87466286b6 S3C-1115 FT: GCP COPY
Implements GCP Object COPY API
2018-03-07 18:47:56 -08:00
Alexander Chan a89a26aaa1 FX: GCP PUT API Test
Fixes the use of an undefined variable that leads to creation of buckets
that aren't deleted on GCP.
2018-03-07 18:47:56 -08:00
Alexander Chan 54037fc1bd S3C-1115 FT: GCP Object APIs
Implements GCP Object PUT/GET/GET/DELETE APIs
2018-03-07 18:47:56 -08:00
Alexander Chan 3f42030c86 [FIX] GCP Healthcheck
Removes one layer of nesting that leads to errors not being detected.
2018-03-07 18:47:56 -08:00
Alexander Chan ecec50845c S3C-1115 FT: Adds GCP Backend healthcheck
Implements the APIs to enable backend healthcheck of GCP storages buckets
2018-03-07 18:47:56 -08:00
Rahul Padigela a77a21f957
Merge pull request #1129 from scality/ft/pensieve-credentials
rf: moving backends to Arsenal
2018-03-07 15:07:57 -08:00
JianqinWang ec4427c2c0 rf: rename mongodb replica hosts 2018-03-06 16:46:11 -08:00
JianqinWang fb09cb9a39 ZENKO-140 - rf: move metadata backends to Arsenal 2018-03-06 16:46:11 -08:00
ironman-machine 0f8b957901 merge #1119 2018-02-16 03:42:59 +00:00
Lauren Spiegel fb52735a23 CHORE: Update mongo version 2018-02-15 15:51:21 -08:00
Bennett Buchanan b5aa64ec89
Merge pull request #1128 from scality/FIX/typoInPublicCloudDoc
FIX: Typo in Public Clouds Doc
2018-02-15 12:03:39 -08:00
LaureVergeron c6099ee186 FIX: Typo in Public Clouds Doc 2018-02-15 16:03:59 +01:00
ThibaultRiviere 6ec0562afa
Merge pull request #1105 from scality/fwdport_7.4_master
Fwdport 7.4 master
2018-02-07 15:58:15 +01:00
Thibault Riviere 9d58964a60 Merge branch 'rel/7.4' into fwdport_7.4_master 2018-02-07 13:39:34 +01:00
ironman-machine c7137d1500 merge #1107 2018-02-07 01:05:52 +00:00
Alexander Chan 4dc537e5e0 FX: GCP PUT API Test
Fixes the use of an undefined variable that leads to creation of buckets
that aren't deleted on GCP.
2018-02-06 15:25:22 -08:00
Bennett Buchanan 1dcbacf594
Merge pull request #1095 from scality/ft/S3C-1115/GCP-APIs-Object
Ft/s3 c 1115/gcp apis object
2018-02-04 11:16:49 -08:00
Alexander Chan 60c1281ee8 S3C-1115 FT: GCP Object APIs
Implements GCP Object PUT/GET/GET/DELETE APIs
2018-02-02 17:40:37 -08:00
Lauren Spiegel effb39cad8
Merge pull request #1084 from scality/goodbye/mongo
Goodbye/mongo
2018-02-02 17:00:25 -08:00
Lauren Spiegel bc0e0ad057 Move mongo client to arsenal 2018-02-02 15:12:11 -08:00
Bennett Buchanan fcb5c0ea07
Merge pull request #1103 from scality/fix/update-readme-for-configuration-section
FIX: Update link to Configuration doc in README.md
2018-02-02 13:47:21 -08:00
Bennett Buchanan 0b002d8f15 FIX: Update link to Configuration doc in README.md 2018-02-02 10:35:14 -08:00
ironman-machine fdc2e580b0 merge #1093 2018-02-02 01:57:23 +00:00
Alexander Chan 61d6e4bfc7 S3C-1115 FT: Adds GCP Backend healthcheck
Implements the APIs to enable backend healthcheck of GCP storages buckets
2018-02-01 15:40:15 -08:00
Lauren Spiegel cd252ff793
Merge pull request #1097 from scality/ft/kub-mongo
Improved logging and Replica set compatibility
2018-02-01 15:37:14 -08:00
Salim d195267f5f Improved Mongo Loggging 2018-01-31 17:46:02 -08:00
Salim 24ae3989aa Replica Set Compatibility 2018-01-31 15:46:40 -08:00
396 changed files with 33531 additions and 10543 deletions

View File

@ -1,3 +1,9 @@
.git
.github
node_modules
localData/*
localMetadata/*
# Keep the .git/HEAD file in order to properly report version
.git/objects
.github
.tox
coverage
.DS_Store

View File

@ -1,10 +1,54 @@
{
"extends": "scality",
"plugins": [
"mocha"
],
"rules": {
"import/extensions": "off",
"lines-around-directive": "off",
"no-underscore-dangle": "off",
"indent": "off",
"object-curly-newline": "off",
"operator-linebreak": "off",
"function-paren-newline": "off",
"import/newline-after-import": "off",
"prefer-destructuring": "off",
"implicit-arrow-linebreak": "off",
"no-bitwise": "off",
"dot-location": "off",
"comma-dangle": "off",
"no-undef-init": "off",
"global-require": "off",
"import/no-dynamic-require": "off",
"class-methods-use-this": "off",
"no-plusplus": "off",
"no-else-return": "off",
"object-property-newline": "off",
"import/order": "off",
"no-continue": "off",
"no-tabs": "off",
"lines-between-class-members": "off",
"prefer-spread": "off",
"no-lonely-if": "off",
"no-useless-escape": "off",
"no-restricted-globals": "off",
"no-buffer-constructor": "off",
"import/no-extraneous-dependencies": "off",
"space-unary-ops": "off",
"no-useless-return": "off",
"no-unexpected-multiline": "off",
"no-mixed-operators": "off",
"newline-per-chained-call": "off",
"operator-assignment": "off",
"spaced-comment": "off",
"comma-style": "off",
"no-restricted-properties": "off",
"new-parens": "off",
"no-multi-spaces": "off",
"quote-props": "off",
"mocha/no-exclusive-tests": "error",
},
"parserOptions": {
"ecmaVersion": 2018,
"sourceType": "module",
"ecmaFeatures": {
"jsx": true
}
"ecmaVersion": 2020
}
}

View File

@ -1,19 +1,32 @@
# Issue template
# General support information
If you are reporting a new issue, make sure that we do not have any
duplicates already open. You can ensure this by searching the issue list for
this repository. If there is a duplicate, please close your issue and add a
comment to the existing issue instead.
GitHub Issues are **reserved** for actionable bug reports (including
documentation inaccuracies), and feature requests.
**All questions** (regarding configuration, use cases, performance, community,
events, setup and usage recommendations, among other things) should be asked on
the **[Zenko Forum](http://forum.zenko.io/)**.
## General support information
> Questions opened as GitHub issues will systematically be closed, and moved to
> the [Zenko Forum](http://forum.zenko.io/).
GitHub Issues are reserved for actionable bug reports and feature requests.
General questions should be sent to the
[S3 scality server Forum](http://forum.scality.com/).
--------------------------------------------------------------------------------
## Avoiding duplicates
When reporting a new issue/requesting a feature, make sure that we do not have
any duplicates already open:
- search the issue list for this repository (use the search bar, select
"Issues" on the left pane after searching);
- if there is a duplicate, please do not open your issue, and add a comment
to the existing issue instead.
--------------------------------------------------------------------------------
## Bug report information
(delete this section if not applicable)
(delete this section (everything between the lines) if you're not reporting a bug
but requesting a feature)
### Description
@ -29,13 +42,22 @@ Describe the results you received
### Expected result
Describe the results you expecteds
Describe the results you expected
### Additional information: (Node.js version, Docker version, etc)
### Additional information
- Node.js version,
- Docker version,
- yarn version,
- distribution/OS,
- optional: anything else you deem helpful to us.
--------------------------------------------------------------------------------
## Feature Request
(delete this section if not applicable)
(delete this section (everything between the lines) if you're not requesting
a feature but reporting a bug)
### Proposal
@ -52,3 +74,14 @@ What you would like to happen
### Use case
Please provide use cases for changing the current behavior
### Additional information
- Is this request for your company? Y/N
- If Y: Company name:
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
- Are you willing to contribute this feature yourself?
- Position/Title:
- How did you hear about us?
--------------------------------------------------------------------------------

View File

@ -15,7 +15,7 @@ runs:
shell: bash
run: |-
set -exu;
mkdir -p /tmp/artifacts/${{ github.job }}/;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4
with:
node-version: '16'
@ -23,3 +23,21 @@ runs:
- name: install dependencies
shell: bash
run: yarn install --ignore-engines --frozen-lockfile --network-concurrency 1
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
- uses: actions/setup-python@v4
with:
python-version: 3.9
- name: Setup python2 test environment
shell: bash
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: fix sproxyd.conf permissions
shell: bash
run: sudo chown root:root .github/docker/sproxyd/conf/sproxyd0.conf
- name: ensure fuse kernel module is loaded (for sproxyd)
shell: bash
run: sudo modprobe fuse

25
.github/ceph/Dockerfile vendored Normal file
View File

@ -0,0 +1,25 @@
FROM ceph/daemon:v3.2.1-stable-3.2-mimic-centos-7
ENV CEPH_DAEMON demo
ENV CEPH_DEMO_DAEMONS mon,mgr,osd,rgw
ENV CEPH_DEMO_UID zenko
ENV CEPH_DEMO_ACCESS_KEY accessKey1
ENV CEPH_DEMO_SECRET_KEY verySecretKey1
ENV CEPH_DEMO_BUCKET zenkobucket
ENV CEPH_PUBLIC_NETWORK 0.0.0.0/0
ENV MON_IP 0.0.0.0
ENV NETWORK_AUTO_DETECT 4
ENV RGW_CIVETWEB_PORT 8001
RUN rm /etc/yum.repos.d/tcmu-runner.repo
ADD ./entrypoint-wrapper.sh /
RUN chmod +x /entrypoint-wrapper.sh && \
yum install -y python-pip && \
yum clean all && \
pip install awscli && \
rm -rf /root/.cache/pip
ENTRYPOINT [ "/entrypoint-wrapper.sh" ]

37
.github/ceph/entrypoint-wrapper.sh vendored Normal file
View File

@ -0,0 +1,37 @@
#!/bin/sh
touch /artifacts/ceph.log
mkfifo /tmp/entrypoint_output
# We run this in the background so that we can tail the RGW log after init,
# because entrypoint.sh never returns
# The next line will be needed when ceph builds 3.2.2 so I'll leave it here
# bash /opt/ceph-container/bin/entrypoint.sh > /tmp/entrypoint_output &
bash /entrypoint.sh > /tmp/entrypoint_output &
entrypoint_pid="$!"
while read -r line; do
echo $line
# When we find this line server has started
if [ -n "$(echo $line | grep 'Creating bucket')" ]; then
break
fi
done < /tmp/entrypoint_output
# Make our buckets - CEPH_DEMO_BUCKET is set to force the "Creating bucket" message, but unused
s3cmd mb s3://cephbucket s3://cephbucket2
mkdir /root/.aws
cat > /root/.aws/credentials <<EOF
[default]
aws_access_key_id = accessKey1
aws_secret_access_key = verySecretKey1
EOF
# Enable versioning on them
for bucket in cephbucket cephbucket2; do
echo "Enabling versiong for $bucket"
aws --endpoint http://127.0.0.1:8001 s3api put-bucket-versioning --bucket $bucket --versioning Status=Enabled
done
tail -f /var/log/ceph/client.rgw.*.log | tee -a /artifacts/ceph.log
wait $entrypoint_pid

11
.github/ceph/wait_for_ceph.sh vendored Normal file
View File

@ -0,0 +1,11 @@
#!/bin/sh
# This script is needed because RADOS Gateway
# will open the port before beginning to serve traffic
# causing wait_for_local_port.bash to exit immediately
echo 'Waiting for ceph'
while [ -z "$(curl 127.0.0.1:8001 2>/dev/null)" ]; do
sleep 1
echo -n "."
done

View File

@ -34,4 +34,3 @@ gcpbackendmismatch_GCP_SERVICE_KEY
gcpbackend_GCP_SERVICE_KEYFILE
gcpbackendmismatch_GCP_SERVICE_KEYFILE
gcpbackendnoproxy_GCP_SERVICE_KEYFILE
gcpbackendproxy_GCP_SERVICE_KEYFILE

View File

@ -20,6 +20,7 @@ services:
- METADATA_HOST=0.0.0.0
- S3BACKEND
- S3DATA
- S3METADATA
- MPU_TESTING
- S3VAULT
- S3_LOCATION_FILE
@ -34,7 +35,16 @@ services:
- S3KMIP_KEY
- S3KMIP_CERT
- S3KMIP_CA
- MONGODB_HOSTS=0.0.0.0:27018
- MONGODB_RS=rs0
- DEFAULT_BUCKET_KEY_FORMAT
- METADATA_MAX_CACHED_BUCKETS
- ENABLE_NULL_VERSION_COMPAT_MODE
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
@ -65,3 +75,18 @@ services:
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:
network_mode: "host"
profiles: ['mongo', 'ceph']
image: ${MONGODB_IMAGE}
ceph:
network_mode: "host"
profiles: ['ceph']
image: ghcr.io/scality/cloudserver/ci-ceph
sproxyd:
network_mode: "host"
profiles: ['sproxyd']
image: sproxyd-standalone
build: ./sproxyd
user: 0:0
privileged: yes

28
.github/docker/mongodb/Dockerfile vendored Normal file
View File

@ -0,0 +1,28 @@
FROM mongo:5.0.21
ENV USER=scality \
HOME_DIR=/home/scality \
CONF_DIR=/conf \
DATA_DIR=/data
# Set up directories and permissions
RUN mkdir -p /data/db /data/configdb && chown -R mongodb:mongodb /data/db /data/configdb; \
mkdir /logs; \
adduser --uid 1000 --disabled-password --gecos --quiet --shell /bin/bash scality
# Set up environment variables and directories for scality user
RUN mkdir ${CONF_DIR} && \
chown -R ${USER} ${CONF_DIR} && \
chown -R ${USER} ${DATA_DIR}
# copy the mongo config file
COPY /conf/mongod.conf /conf/mongod.conf
COPY /conf/mongo-run.sh /conf/mongo-run.sh
COPY /conf/initReplicaSet /conf/initReplicaSet.js
EXPOSE 27017/tcp
EXPOSE 27018
# Set up CMD
ENTRYPOINT ["bash", "/conf/mongo-run.sh"]
CMD ["bash", "/conf/mongo-run.sh"]

View File

@ -0,0 +1,4 @@
rs.initiate({
_id: "rs0",
members: [{ _id: 0, host: "127.0.0.1:27018" }]
});

View File

@ -0,0 +1,10 @@
#!/bin/bash
set -exo pipefail
init_RS() {
sleep 5
mongo --port 27018 /conf/initReplicaSet.js
}
init_RS &
mongod --bind_ip_all --config=/conf/mongod.conf

15
.github/docker/mongodb/conf/mongod.conf vendored Normal file
View File

@ -0,0 +1,15 @@
storage:
journal:
enabled: true
engine: wiredTiger
dbPath: "/data/db"
processManagement:
fork: false
net:
port: 27018
bindIp: 0.0.0.0
replication:
replSetName: "rs0"
enableMajorityReadConcern: true
security:
authorization: disabled

3
.github/docker/sproxyd/Dockerfile vendored Normal file
View File

@ -0,0 +1,3 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -0,0 +1,26 @@
fastcgi_param QUERY_STRING $query_string;
fastcgi_param REQUEST_METHOD $request_method;
fastcgi_param CONTENT_TYPE $content_type;
fastcgi_param CONTENT_LENGTH $content_length;
#fastcgi_param SCRIPT_NAME $fastcgi_script_name;
fastcgi_param SCRIPT_NAME /var/www;
fastcgi_param PATH_INFO $document_uri;
fastcgi_param REQUEST_URI $request_uri;
fastcgi_param DOCUMENT_URI $document_uri;
fastcgi_param DOCUMENT_ROOT $document_root;
fastcgi_param SERVER_PROTOCOL $server_protocol;
fastcgi_param HTTPS $https if_not_empty;
fastcgi_param GATEWAY_INTERFACE CGI/1.1;
fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
fastcgi_param REMOTE_ADDR $remote_addr;
fastcgi_param REMOTE_PORT $remote_port;
fastcgi_param SERVER_ADDR $server_addr;
fastcgi_param SERVER_PORT $server_port;
fastcgi_param SERVER_NAME $server_name;
# PHP only, required if PHP was built with --enable-force-cgi-redirect
fastcgi_param REDIRECT_STATUS 200;

88
.github/docker/sproxyd/conf/nginx.conf vendored Normal file
View File

@ -0,0 +1,88 @@
worker_processes 1;
error_log /logs/error.log;
user root root;
events {
worker_connections 1000;
reuse_port on;
multi_accept on;
}
worker_rlimit_nofile 20000;
http {
root /var/www/;
upstream sproxyds {
least_conn;
keepalive 40;
server 127.0.0.1:20000;
}
server {
client_max_body_size 0;
client_body_timeout 150;
client_header_timeout 150;
postpone_output 0;
client_body_postpone_size 0;
keepalive_requests 1100;
keepalive_timeout 300s;
server_tokens off;
default_type application/octet-stream;
gzip off;
tcp_nodelay on;
tcp_nopush on;
sendfile on;
listen 81;
server_name localhost;
rewrite ^/arc/(.*)$ /dc1/$1 permanent;
location ~* ^/proxy/(.*)$ {
rewrite ^/proxy/(.*)$ /$1 last;
}
allow 127.0.0.1;
deny all;
set $usermd '-';
set $sentusermd '-';
set $elapsed_ms '-';
set $now '-';
log_by_lua '
if not(ngx.var.http_x_scal_usermd == nil) and string.len(ngx.var.http_x_scal_usermd) > 2 then
ngx.var.usermd = string.sub(ngx.decode_base64(ngx.var.http_x_scal_usermd),1,-3)
end
if not(ngx.var.sent_http_x_scal_usermd == nil) and string.len(ngx.var.sent_http_x_scal_usermd) > 2 then
ngx.var.sentusermd = string.sub(ngx.decode_base64(ngx.var.sent_http_x_scal_usermd),1,-3)
end
local elapsed_ms = tonumber(ngx.var.request_time)
if not ( elapsed_ms == nil) then
elapsed_ms = elapsed_ms * 1000
ngx.var.elapsed_ms = tostring(elapsed_ms)
end
local time = tonumber(ngx.var.msec) * 1000
ngx.var.now = time
';
log_format irm '{ "time":"$now","connection":"$connection","request":"$connection_requests","hrtime":"$msec",'
'"httpMethod":"$request_method","httpURL":"$uri","elapsed_ms":$elapsed_ms,'
'"httpCode":$status,"requestLength":$request_length,"bytesSent":$bytes_sent,'
'"contentLength":"$content_length","sentContentLength":"$sent_http_content_length",'
'"contentType":"$content_type","s3Address":"$remote_addr",'
'"requestUserMd":"$usermd","responseUserMd":"$sentusermd",'
'"ringKeyVersion":"$sent_http_x_scal_version","ringStatus":"$sent_http_x_scal_ring_status",'
'"s3Port":"$remote_port","sproxydStatus":"$upstream_status","req_id":"$http_x_scal_request_uids",'
'"ifMatch":"$http_if_match","ifNoneMatch":"$http_if_none_match",'
'"range":"$http_range","contentRange":"$sent_http_content_range","nginxPID":$PID,'
'"sproxydAddress":"$upstream_addr","sproxydResponseTime_s":"$upstream_response_time" }';
access_log /dev/stdout irm;
error_log /dev/stdout error;
location / {
proxy_request_buffering off;
fastcgi_request_buffering off;
fastcgi_no_cache 1;
fastcgi_cache_bypass 1;
fastcgi_buffering off;
fastcgi_ignore_client_abort on;
fastcgi_keep_conn on;
include fastcgi_params;
fastcgi_pass sproxyds;
fastcgi_next_upstream error timeout;
fastcgi_send_timeout 285s;
fastcgi_read_timeout 285s;
}
}
}

View File

@ -0,0 +1,12 @@
{
"general": {
"ring": "DATA",
"port": 20000,
"syslog_facility": "local0"
},
"ring_driver:0": {
"alias": "dc1",
"type": "local",
"queue_path": "/tmp/ring-objs"
},
}

View File

@ -0,0 +1,43 @@
[supervisord]
nodaemon = true
loglevel = info
logfile = %(ENV_LOG_DIR)s/supervisord.log
pidfile = %(ENV_SUP_RUN_DIR)s/supervisord.pid
logfile_maxbytes = 20MB
logfile_backups = 2
[unix_http_server]
file = %(ENV_SUP_RUN_DIR)s/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl = unix://%(ENV_SUP_RUN_DIR)s/supervisor.sock
[program:nginx]
directory=%(ENV_SUP_RUN_DIR)s
command=bash -c "/usr/sbin/nginx -c %(ENV_CONF_DIR)s/nginx.conf -g 'daemon off;'"
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stderr_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s-stderr.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=7
autorestart=true
autostart=true
user=root
[program:sproxyd]
directory=%(ENV_SUP_RUN_DIR)s
process_name=%(program_name)s-%(process_num)s
numprocs=1
numprocs_start=0
command=/usr/bin/sproxyd -dlw -V127 -c %(ENV_CONF_DIR)s/sproxyd%(process_num)s.conf -P /run%(process_num)s
stdout_logfile = %(ENV_LOG_DIR)s/%(program_name)s-%(process_num)s.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=7
redirect_stderr=true
autorestart=true
autostart=true
user=root

35
.github/workflows/alerts.yaml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Test alerts
on:
push:
branches-ignore:
- 'development/**'
- 'q/*/**'
jobs:
run-alert-tests:
runs-on: ubuntu-latest
strategy:
matrix:
tests:
- name: 1 minute interval tests
file: monitoring/alerts.test.yaml
- name: 10 seconds interval tests
file: monitoring/alerts.10s.test.yaml
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -9,6 +9,9 @@ on:
description: 'Tag to be released'
required: true
env:
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
runs-on: ubuntu-20.04
@ -34,32 +37,38 @@ jobs:
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
build-image:
runs-on: ubuntu-20.04
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
- name: Login to Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image
- name: Push dashboards into the production namespace
run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v5
with:
push: true
context: .
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
github-release:
runs-on: ubuntu-latest
steps:
- name: Create Release
uses: softprops/action-gh-release@v2
env:

View File

@ -2,6 +2,8 @@
name: tests
on:
workflow_dispatch:
push:
branches-ignore:
- 'development/**'
@ -126,6 +128,9 @@ jobs:
build:
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
@ -162,39 +167,21 @@ jobs:
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
build-federation-image:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
- name: Build and push MongoDB
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}-svc-base
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
S3DATA: multiple
@ -202,18 +189,22 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-python@v5
- name: Login to Registry
uses: docker/login-action@v3
with:
python-version: 3.9
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose up -d
run: docker compose --profile sproxyd up -d
working-directory: .github/docker
- name: Run multiple backend test
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
bash wait_for_local_port.bash 81 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
@ -227,14 +218,89 @@ jobs:
source: /tmp/artifacts
if: always()
mongo-v0-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
mongo-v1-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
MPU_TESTING: "yes"
S3METADATA: mongodb
S3KMS: file
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run functional tests
run: |-
set -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run ft_mixed_bucket_format_version | tee /tmp/artifacts/${{ github.job }}/mixed-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
file-ft-tests:
strategy:
matrix:
include:
- enable-null-compat: ''
job-name: file-ft-tests
- enable-null-compat: 'true'
job-name: file-ft-tests-null-compat
- job-name: file-ft-tests
name: ${{ matrix.job-name }}
runs-on: ubuntu-latest
needs: build
@ -242,15 +308,12 @@ jobs:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
ENABLE_NULL_VERSION_COMPAT_MODE: "${{ matrix.enable-null-compat }}"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
@ -258,10 +321,6 @@ jobs:
run: |
set -exu
mkdir -p /tmp/artifacts/${{ matrix.job-name }}/
- name: Setup python test environment
run: |
sudo apt-get install -y libdigest-hmac-perl
pip install 's3cmd==2.3.0'
- name: Setup CI services
run: docker compose up -d
working-directory: .github/docker
@ -288,13 +347,11 @@ jobs:
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -315,22 +372,64 @@ jobs:
source: /tmp/artifacts
if: always()
quota-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
working-directory: .github/docker
- name: Run quota tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
kmip-ft-tests:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: true
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.9
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
@ -354,3 +453,81 @@ jobs:
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
env:
S3BACKEND: mem
S3DATA: multiple
S3KMS: file
CI_CEPH: 'true'
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
with:
ruby-version: '2.5.9'
- name: Install Ruby dependencies
run: |
gem install nokogiri:1.12.5 excon:0.109.0 fog-aws:1.3.0 json mime-types:3.1 rspec:3.5
- name: Install Java dependencies
run: |
sudo apt-get update && sudo apt-get install -y --fix-missing default-jdk maven
- name: Setup CI services
run: docker compose --profile ceph up -d
working-directory: .github/docker
env:
S3METADATA: mongodb
- name: Run Ceph multiple backend tests
run: |-
set -ex -o pipefail;
bash .github/ceph/wait_for_ceph.sh
bash wait_for_local_port.bash 27018 40
bash wait_for_local_port.bash 8000 40
yarn run multiple_backend_test | tee /tmp/artifacts/${{ github.job }}/multibackend-tests.log
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
S3METADATA: mem
- name: Run Java tests
run: |-
set -ex -o pipefail;
mvn test | tee /tmp/artifacts/${{ github.job }}/java-tests.log
working-directory: tests/functional/jaws
- name: Run Ruby tests
run: |-
set -ex -o pipefail;
rspec -fd --backtrace tests.rb | tee /tmp/artifacts/${{ github.job }}/ruby-tests.log
working-directory: tests/functional/fog
- name: Run Javascript AWS SDK tests
run: |-
set -ex -o pipefail;
yarn run ft_awssdk | tee /tmp/artifacts/${{ github.job }}/js-awssdk-tests.log;
yarn run ft_s3cmd | tee /tmp/artifacts/${{ github.job }}/js-s3cmd-tests.log;
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigCeph.json
S3BACKEND: file
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
with:
method: upload
url: https://artifacts.scality.net
user: ${{ secrets.ARTIFACTS_USER }}
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()

5
.gitignore vendored
View File

@ -22,9 +22,14 @@ coverage
# Compiled binary addons (http://nodejs.org/api/addons.html)
build/Release
# Sphinx build dir
_build
# Dependency directory
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
node_modules
yarn.lock
.tox
# Junit directory
junit

View File

@ -1,22 +1,19 @@
FROM node:16.13.2-slim
MAINTAINER Giorgio Regni <gr@scality.com>
ARG NODE_VERSION=16.20-bullseye-slim
FROM node:${NODE_VERSION} as builder
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY ./package.json yarn.lock ./
ENV PYTHON=python3.9
ENV PY_VERSION=3.9.7
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
jq \
python \
git \
build-essential \
ssh \
ca-certificates \
curl \
git \
gnupg2 \
jq \
python3 \
ssh \
wget \
libffi-dev \
zlib1g-dev \
@ -24,28 +21,40 @@ RUN apt-get update \
&& mkdir -p /root/ssh \
&& ssh-keyscan -H github.com > /root/ssh/known_hosts
RUN cd /tmp \
&& wget https://www.python.org/ftp/python/$PY_VERSION/Python-$PY_VERSION.tgz \
&& tar -C /usr/local/bin -xzvf Python-$PY_VERSION.tgz \
&& cd /usr/local/bin/Python-$PY_VERSION \
&& ./configure --enable-optimizations \
&& make \
&& make altinstall \
&& rm -rf /tmp/Python-$PY_VERSION.tgz
ENV PYTHON=python3
COPY package.json yarn.lock /usr/src/app/
RUN npm install typescript -g
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
RUN yarn cache clean \
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
&& apt-get autoremove --purge -y python git build-essential \
&& rm -rf /var/lib/apt/lists/* \
&& yarn cache clean \
&& rm -rf ~/.node-gyp \
&& rm -rf /tmp/yarn-*
################################################################################
FROM node:${NODE_VERSION}
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
&& rm -rf /var/lib/apt/lists/*
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
EXPOSE 8000
EXPOSE 8002
RUN apt-get update && \
apt-get install -y --no-install-recommends \
jq \
tini \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /usr/src/app
# Keep the .git directory in order to properly report version
COPY . /usr/src/app
COPY --from=builder /usr/src/app/node_modules ./node_modules/
COPY ./ ./
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "yarn", "start" ]
ENTRYPOINT ["tini", "--", "/usr/src/app/docker-entrypoint.sh"]
EXPOSE 8000 8002
CMD [ "yarn", "start" ]

View File

@ -1,6 +1,7 @@
# S3 Healthcheck
Scality S3 exposes a healthcheck route `/_/healthcheck` which returns a
Scality S3 exposes a healthcheck route `/live` on the port used
for the metrics (defaults to port 8002) which returns a
response with HTTP code
- 200 OK

165
README.md
View File

@ -1,12 +1,7 @@
# Zenko CloudServer
# Zenko CloudServer with Vitastor Backend
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![CircleCI][badgepub]](https://circleci.com/gh/scality/S3)
[![Scality CI][badgepriv]](http://ci.ironmann.io/gh/scality/S3)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/scality/s3server/)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -16,125 +11,71 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
## Quick Start with Vitastor
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## Docker
Installation instructions:
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/scality/s3server/)
### Install Vitastor
## Contributing
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
### Install Zenko with Vitastor Backend
## Installation
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
### Dependencies
### Install and Configure MongoDB
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
### Clone source code
### Setup Zenko
```shell
git clone https://github.com/scality/S3.git
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
```
If you get an error regarding installation of the diskUsage module,
please install g++.
# Author & License
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section below to learn how to set
location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)

View File

@ -1,2 +1,2 @@
---
theme: jekyll-theme-minimal
theme: jekyll-theme-modernist

56
authdata.json.example Normal file
View File

@ -0,0 +1,56 @@
{
"accounts": [{
"name": "Bart",
"email": "sampleaccount1@sampling.com",
"arn": "arn:aws:iam::123456789012:root",
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
"shortid": "123456789012",
"keys": [{
"access": "accessKey1",
"secret": "verySecretKey1"
}]
}, {
"name": "Lisa",
"email": "sampleaccount2@sampling.com",
"arn": "arn:aws:iam::123456789013:root",
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf",
"shortid": "123456789013",
"keys": [{
"access": "accessKey2",
"secret": "verySecretKey2"
}]
},
{
"name": "Clueso",
"email": "inspector@clueso.info",
"arn": "arn:aws:iam::123456789014:root",
"canonicalID": "http://acs.zenko.io/accounts/service/clueso",
"shortid": "123456789014",
"keys": [{
"access": "cluesoKey1",
"secret": "cluesoSecretKey1"
}]
},
{
"name": "Replication",
"email": "inspector@replication.info",
"arn": "arn:aws:iam::123456789015:root",
"canonicalID": "http://acs.zenko.io/accounts/service/replication",
"shortid": "123456789015",
"keys": [{
"access": "replicationKey1",
"secret": "replicationSecretKey1"
}]
},
{
"name": "Lifecycle",
"email": "inspector@lifecycle.info",
"arn": "arn:aws:iam::123456789016:root",
"canonicalID": "http://acs.zenko.io/accounts/service/lifecycle",
"shortid": "123456789016",
"keys": [{
"access": "lifecycleKey1",
"secret": "lifecycleSecretKey1"
}]
}]
}

View File

@ -0,0 +1,4 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
require('../lib/nfs/utilities.js').createBucketWithNFSEnabled();

108
bin/search_bucket.js Executable file
View File

@ -0,0 +1,108 @@
#!/bin/sh
// 2>/dev/null ; exec "$(which nodejs 2>/dev/null || which node)" "$0" "$@"
'use strict'; // eslint-disable-line strict
const { auth } = require('arsenal');
const commander = require('commander');
const http = require('http');
const https = require('https');
const logger = require('../lib/utilities/logger');
function _performSearch(host,
port,
bucketName,
query,
listVersions,
accessKey,
secretKey,
sessionToken,
verbose, ssl) {
const escapedSearch = encodeURIComponent(query);
const options = {
host,
port,
method: 'GET',
path: `/${bucketName}/?search=${escapedSearch}${listVersions ? '&&versions' : ''}`,
headers: {
'Content-Length': 0,
},
rejectUnauthorized: false,
versions: '',
};
if (sessionToken) {
options.headers['x-amz-security-token'] = sessionToken;
}
const transport = ssl ? https : http;
const request = transport.request(options, response => {
if (verbose) {
logger.info('response status code', {
statusCode: response.statusCode,
});
logger.info('response headers', { headers: response.headers });
}
const body = [];
response.setEncoding('utf8');
response.on('data', chunk => body.push(chunk));
response.on('end', () => {
if (response.statusCode >= 200 && response.statusCode < 300) {
logger.info('Success');
process.stdout.write(body.join(''));
process.exit(0);
} else {
logger.error('request failed with HTTP Status ', {
statusCode: response.statusCode,
body: body.join(''),
});
process.exit(1);
}
});
});
// generateV4Headers exepects request object with path that does not
// include query
request.path = `/${bucketName}`;
const requestData = listVersions ? { search: query, versions: '' } : { search: query };
auth.client.generateV4Headers(request, requestData, accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}${listVersions ? '&&versions' : ''}`;
if (verbose) {
logger.info('request headers', { headers: request._headers });
}
request.end();
}
/**
* This function is used as a binary to send a request to S3 to perform a
* search on the objects in a bucket
*
* @return {undefined}
*/
function searchBucket() {
// TODO: Include other bucket listing possible query params?
commander
.version('0.0.1')
.option('-a, --access-key <accessKey>', 'Access key id')
.option('-k, --secret-key <secretKey>', 'Secret access key')
.option('-t, --session-token <sessionToken>', 'Session token')
.option('-b, --bucket <bucket>', 'Name of the bucket')
.option('-q, --query <query>', 'Search query')
.option('-h, --host <host>', 'Host of the server')
.option('-p, --port <port>', 'Port of the server')
.option('-s', '--ssl', 'Enable ssl')
.option('-l, --list-versions', 'List all versions of the objects that meet the search query, ' +
'otherwise only list the latest version')
.option('-v, --verbose')
.parse(process.argv);
const { host, port, accessKey, secretKey, sessionToken, bucket, query, listVersions, verbose, ssl } =
commander;
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
logger.error('missing parameter');
commander.outputHelp();
process.exit(1);
}
_performSearch(host, port, bucket, query, listVersions, accessKey, secretKey, sessionToken, verbose,
ssl);
}
searchBucket();

View File

@ -1,45 +0,0 @@
---
general:
branches:
ignore:
- /^ultron\/.*/ # Ignore ultron/* branches
artifacts:
- coverage/
machine:
node:
version: 6.13.1
services:
- redis
- docker
ruby:
version: "2.4.1"
environment:
CXX: g++-4.9
ENABLE_LOCAL_CACHE: true
REPORT_TOKEN: report-token-1
hosts:
bucketwebsitetester.s3-website-us-east-1.amazonaws.com: 127.0.0.1
dependencies:
override:
- rm -rf node_modules
- yarn install --frozen-lockfile
post:
- sudo pip install flake8 yamllint
- sudo pip install s3cmd==1.6.1
# fog and ruby testing dependencies
- gem install fog-aws -v 1.3.0
- gem install mime-types -v 3.1
- gem install rspec -v 3.5
- gem install json
# java sdk dependencies
- sudo apt-get install -y -q default-jdk
test:
override:
- docker run --name squid-proxy -d --net=host
--publish 3128:3128 sameersbn/squid:3.3.8-23
- bash tests.bash:
parallel: true

View File

@ -1,23 +0,0 @@
{
"accounts": [{
"name": "Bart",
"email": "sampleaccount1@sampling.com",
"arn": "arn:aws:iam::123456789012:root",
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
"shortid": "123456789012",
"keys": [{
"access": "accessKey1",
"secret": "verySecretKey1"
}]
}, {
"name": "Lisa",
"email": "sampleaccount2@sampling.com",
"arn": "arn:aws:iam::123456789013:root",
"canonicalID": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2bf",
"shortid": "123456789013",
"keys": [{
"access": "accessKey2",
"secret": "verySecretKey2"
}]
}]
}

View File

@ -4,13 +4,16 @@
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
"cloudserver-front": "us-east-1",
"s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1"
"s3.amazonaws.com": "us-east-1",
"zenko-cloudserver-replicator": "us-east-1",
"lb": "us-east-1"
},
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com",
@ -25,7 +28,8 @@
"s3-website-eu-west-1.amazonaws.com",
"s3-website-sa-east-1.amazonaws.com",
"s3-website.localhost",
"s3-website.scality.test"],
"s3-website.scality.test",
"zenkoazuretest.blob.core.windows.net"],
"replicationEndpoints": [{
"site": "zenko",
"servers": ["127.0.0.1:8000"],
@ -34,6 +38,14 @@
"site": "us-east-2",
"type": "aws_s3"
}],
"backbeat": {
"host": "localhost",
"port": 8900
},
"workflowEngineOperator": {
"host": "localhost",
"port": 3001
},
"cdmi": {
"host": "localhost",
"port": 81,
@ -47,7 +59,7 @@
"host": "localhost",
"port": 8500
},
"clusters": 10,
"clusters": 1,
"log": {
"logLevel": "info",
"dumpLevel": "error"
@ -63,6 +75,10 @@
"host": "localhost",
"port": 9991
},
"pfsClient": {
"host": "localhost",
"port": 9992
},
"metadataDaemon": {
"bindAddress": "localhost",
"port": 9990
@ -71,10 +87,47 @@
"bindAddress": "localhost",
"port": 9991
},
"pfsDaemon": {
"bindAddress": "localhost",
"port": 9992
},
"recordLog": {
"enabled": false,
"enabled": true,
"recordLogName": "s3-recordlog"
},
"mongodb": {
"replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],

71
config.json.vitastor Normal file
View File

@ -0,0 +1,71 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -39,6 +39,8 @@ const constants = {
// once the multipart upload is complete.
mpuBucketPrefix: 'mpuShadowBucket',
blacklistedPrefixes: { bucket: [], object: [] },
// GCP Object Tagging Prefix
gcpTaggingPrefix: 'aws-tag-',
// PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access
// only public resources
@ -64,14 +66,20 @@ const constants = {
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
minimumAllowedPartSize: 5242880,
// AWS sets a maximum total parts limit
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
maximumAllowedPartCount: 10000,
gcpMaximumAllowedPartCount: 1024,
// Max size on put part or copy part is 5GB. For functional
// testing use 110 MB as max
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
5368709120,
// AWS sets a maximum total parts limit
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
maximumAllowedPartCount: 10000,
// Max size allowed in a single put object request is 5GB
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
maximumAllowedUploadSize: 5368709120,
// AWS states max size for user-defined metadata (x-amz-meta- headers) is
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
@ -96,7 +104,6 @@ const constants = {
'policyStatus',
'publicAccessBlock',
'requestPayment',
'restore',
'torrent',
],
@ -109,11 +116,21 @@ const constants = {
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
objectLocationConstraintHeader: 'x-amz-storage-class',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
// (if any, otherwise an empty object)
serviceAccountProperties: {
replication: {},
lifecycle: {},
gc: {},
'md-ingestion': {
canReplicate: true,
},
},
/* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true },
externalBackends: { aws_s3: true, azure: true, gcp: true, pfs: true, dmf: true, azure_archive: true },
// some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods.
@ -122,13 +139,19 @@ const constants = {
// for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call
externalBackendHealthCheckInterval: 60000,
versioningNotImplBackends: { azure: true },
mpuMDStoredExternallyBackend: { aws_s3: true },
versioningNotImplBackends: { azure: true, gcp: true },
mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
skipBatchDeleteBackends: { azure: true, gcp: true },
s3HandledBackends: { azure: true, gcp: true },
hasCopyPartBackends: { aws_s3: true, gcp: true },
/* eslint-enable camelcase */
mpuMDStoredOnS3Backend: { azure: true },
azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
// location constraint delimiter
zenkoSeparator: ':',
// user metadata applied on zenko objects
zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
bucketOwnerActions: [
@ -181,16 +204,13 @@ const constants = {
'objectPutRetention',
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
multiObjectDeleteConcurrency: 50,
allowedRestoreObjectRequestTierValues: ['Standard'],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',
ORPHAN_DM_TYPE: 'orphan',
},
multiObjectDeleteConcurrency: 50,
maxScannedLifecycleListingEntries: 10000,
overheadField: [
'content-length',
@ -211,11 +231,18 @@ const constants = {
]),
ipv4Regex: /^(\d{1,3}\.){3}\d{1,3}(\/(3[0-2]|[12]?\d))?$/,
ipv6Regex: /^([\da-f]{1,4}:){7}[\da-f]{1,4}$/i,
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
actionsToConsiderAsObjectPut: [
'initiateMultipartUpload',
'objectPutPart',
'completeMultipartUpload',
],
// if requester is not bucket owner, bucket policy actions should be denied with
// MethodNotAllowed error
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
};
module.exports = constants;

View File

@ -17,13 +17,17 @@ process.on('uncaughtException', err => {
if (config.backends.data === 'file' ||
(config.backends.data === 'multiple' &&
config.backends.metadata !== 'scality')) {
const dataServer = new arsenal.network.rest.RESTServer(
{ bindAddress: config.dataDaemon.bindAddress,
const dataServer = new arsenal.network.rest.RESTServer({
bindAddress: config.dataDaemon.bindAddress,
port: config.dataDaemon.port,
dataStore: new arsenal.storage.data.file.DataFileStore(
{ dataPath: config.dataDaemon.dataPath,
log: config.log }),
log: config.log });
dataStore: new arsenal.storage.data.file.DataFileStore({
dataPath: config.dataDaemon.dataPath,
log: config.log,
noSync: config.dataDaemon.noSync,
noCache: config.dataDaemon.noCache,
}),
log: config.log,
});
dataServer.setup(err => {
if (err) {
logger.error('Error initializing REST data server',

View File

@ -6,14 +6,15 @@ set -e
# modifying config.json
JQ_FILTERS_CONFIG="."
# ENDPOINT var can accept comma separated values
# for multiple endpoint locations
if [[ "$ENDPOINT" ]]; then
HOST_NAME="$ENDPOINT"
fi
if [[ "$HOST_NAME" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$HOST_NAME\"]=\"us-east-1\""
echo "Host name has been modified to $HOST_NAME"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
for host in "${HOST_NAMES[@]}"; do
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
done
echo "Host name has been modified to ${HOST_NAMES[@]}"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
fi
if [[ "$LOG_LEVEL" ]]; then
@ -25,7 +26,7 @@ if [[ "$LOG_LEVEL" ]]; then
fi
fi
if [[ "$SSL" && "$HOST_NAME" ]]; then
if [[ "$SSL" && "$HOST_NAMES" ]]; then
# This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
# Compute config for utapi tests
@ -36,15 +37,15 @@ prompt = no
req_extensions = s3_req
[req_distinguished_name]
CN = ${HOST_NAME}
CN = ${HOST_NAMES[0]}
[s3_req]
subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth
[alt_names]
DNS.1 = *.${HOST_NAME}
DNS.2 = ${HOST_NAME}
DNS.1 = *.${HOST_NAMES[0]}
DNS.2 = ${HOST_NAMES[0]}
EOF
@ -70,9 +71,14 @@ fi
if [[ "$LISTEN_ADDR" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsDaemon.bindAddress=\"$LISTEN_ADDR\""
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .listenOn=[\"$LISTEN_ADDR:8000\"]"
fi
if [[ "$REPLICATION_GROUP_ID" ]] ; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .replicationGroupId=\"$REPLICATION_GROUP_ID\""
fi
if [[ "$DATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .dataClient.host=\"$DATA_HOST\""
fi
@ -81,6 +87,22 @@ if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
fi
if [[ "$PFSD_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .pfsClient.host=\"$PFSD_HOST\""
fi
if [[ "$MONGODB_HOSTS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
fi
if [[ "$MONGODB_RS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
fi
if [[ "$MONGODB_DATABASE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
fi
if [ -z "$REDIS_HA_NAME" ]; then
REDIS_HA_NAME='mymaster'
fi
@ -113,12 +135,68 @@ if [[ "$RECORDLOG_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
fi
if [[ "$BUCKETD_BOOTSTRAP" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .bucketd.bootstrap=[\"$BUCKETD_BOOTSTRAP\""]
if [[ "$CRR_METRICS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
fi
if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi
if [[ "$WE_OPERATOR_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.host=\"$WE_OPERATOR_HOST\""
fi
if [[ "$WE_OPERATOR_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .workflowEngineOperator.port=$WE_OPERATOR_PORT"
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
# external backends http(s) agent config
# AWS
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
#GCP
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
if [[ -n "$BUCKET_DENY_FILTER" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.filter.deny.bucket=[\"$BUCKET_DENY_FILTER\"]"
fi
if [[ "$TESTING_MODE" ]]; then
@ -130,6 +208,10 @@ if [[ $JQ_FILTERS_CONFIG != "." ]]; then
mv config.json.tmp config.json
fi
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
fi
# s3 secret credentials for Zenko
if [ -r /run/secrets/s3-credentials ] ; then
. /run/secrets/s3-credentials

View File

@ -27,7 +27,7 @@ including null versions and delete markers, described in the above
links.
Implementation of Bucket Versioning in Zenko CloudServer
-----------------------------------------
--------------------------------------------------------
Overview of Metadata and API Component Roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -66,7 +66,7 @@ The second section, `"Implementation of Bucket Versioning in
API" <#implementation-of-bucket-versioning-in-api>`__, describes the way
the metadata options are used in the API within S3 actions to create new
versions, update their metadata, and delete them. The management of null
versions and creation of delete markers are also described in this
versions and creation of delete markers is also described in this
section.
Implementation of Bucket Versioning in Metadata
@ -179,12 +179,13 @@ PUT
the master version with this version.
- ``versionId: <versionId>`` create or update a specific version (for updating
version's ACL or tags, or remote updates in geo-replication)
- if the version identified by ``versionId`` happens to be the latest
* if the version identified by ``versionId`` happens to be the latest
version, the master version will be updated as well
- if the master version is not as recent as the version identified by
* if the master version is not as recent as the version identified by
``versionId``, as may happen with cross-region replication, the master
will be updated as well
- note that with ``versionId`` set to an empty string ``''``, it will
* note that with ``versionId`` set to an empty string ``''``, it will
overwrite the master version only (same as no options, but the master
version will have a ``versionId`` property set in its metadata like
any other version). The ``versionId`` will never be exposed to an
@ -208,10 +209,13 @@ A deletion targeting the latest version of an object has to:
- delete the specified version identified by ``versionId``
- replace the master version with a version that is a placeholder for
deletion
- this version contains a special keyword, 'isPHD', to indicate the
master version was deleted and needs to be updated
- initiate a repair operation to update the value of the master
version:
- involves listing the versions of the object and get the latest
version to replace the placeholder delete version
- if no more versions exist, metadata deletes the master version,
@ -764,7 +768,7 @@ This will open two ports:
Then, one or more instances of Zenko CloudServer without the dmd can be started
elsewhere with:
::
.. code:: sh
yarn run start_s3server

View File

@ -178,7 +178,7 @@ Ruby
~~~~
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: ruby
@ -239,6 +239,7 @@ Python
Client integration
.. code:: python
import boto3
client = boto3.client(
@ -253,6 +254,7 @@ Client integration
Full integration (with object mapping)
.. code:: python
import os
from botocore.utils import fix_s3_host
@ -293,3 +295,51 @@ Should force path-style requests even though v3 advertises it does by default.
$client->createBucket(array(
'Bucket' => 'bucketphp',
));
Go
~~
`AWS Go SDK <https://github.com/aws/aws-sdk-go>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: go
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
func main() {
os.Setenv("AWS_ACCESS_KEY_ID", "accessKey1")
os.Setenv("AWS_SECRET_ACCESS_KEY", "verySecretKey1")
endpoint := "http://localhost:8000"
timeout := time.Duration(10) * time.Second
sess := session.Must(session.NewSession())
// Create a context with a timeout that will abort the upload if it takes
// more than the passed in timeout.
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
svc := s3.New(sess, &aws.Config{
Region: aws.String(endpoints.UsEast1RegionID),
Endpoint: &endpoint,
})
out, err := svc.ListBucketsWithContext(ctx, &s3.ListBucketsInput{})
if err != nil {
log.Fatal(err)
} else {
fmt.Println(out)
}
}

View File

@ -14,7 +14,7 @@ Got an idea? Get started!
In order to contribute, please follow the `Contributing
Guidelines <https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md>`__.
If anything is unclear to you, reach out to us on
`slack <https://zenko-io.slack.com/>`__ or via a GitHub issue.
`forum <https://forum.zenko.io/>`__ or via a GitHub issue.
Don't write code? There are other ways to help!
-----------------------------------------------

View File

@ -1,11 +1,7 @@
Docker
======
- `Environment Variables <#environment-variables>`__
- `Tunables and setup tips <#tunables-and-setup-tips>`__
- `Examples for continuous integration with
Docker <#continuous-integration-with-docker-hosted CloudServer>`__
- `Examples for going in production with Docker <#in-production-with-docker-hosted CloudServer>`__
.. _environment-variables:
Environment Variables
---------------------
@ -15,25 +11,27 @@ S3DATA
S3DATA=multiple
^^^^^^^^^^^^^^^
Allows you to run Scality Zenko CloudServer with multiple data backends, defined
This variable enables running CloudServer with multiple data backends, defined
as regions.
When using multiple data backends, a custom ``locationConfig.json`` file is
mandatory. It will allow you to set custom regions. You will then need to
provide associated rest_endpoints for each custom region in your
``config.json`` file.
`Learn more about multiple backends configuration <../GETTING_STARTED/#location-configuration>`__
If you are using Scality RING endpoints, please refer to your customer
documentation.
For multiple data backends, a custom locationConfig.json file is required.
This file enables you to set custom regions. You must provide associated
rest_endpoints for each custom region in config.json.
Running it with an AWS S3 hosted backend
""""""""""""""""""""""""""""""""""""""""
To run CloudServer with an S3 AWS backend, you will have to add a new section
to your ``locationConfig.json`` file with the ``aws_s3`` location type:
`Learn more about multiple-backend configurations <GETTING_STARTED.html#location-configuration>`__
If you are using Scality RING endpoints, refer to your customer documentation.
Running CloudServer with an AWS S3-Hosted Backend
"""""""""""""""""""""""""""""""""""""""""""""""""
To run CloudServer with an S3 AWS backend, add a new section to the
``locationConfig.json`` file with the ``aws_s3`` location type:
.. code:: json
(...)
(...)
"awsbackend": {
"type": "aws_s3",
"details": {
@ -43,121 +41,134 @@ to your ``locationConfig.json`` file with the ``aws_s3`` location type:
"credentialsProfile": "aws_hosted_profile"
}
}
(...)
(...)
You will also have to edit your AWS credentials file to be able to use your
command line tool of choice. This file should mention credentials for all the
backends you're using. You can use several profiles when using multiple
profiles.
Edit your AWS credentials file to enable your preferred command-line tool.
This file must mention credentials for all backends in use. You can use
several profiles if multiple profiles are configured.
.. code:: json
[default]
aws_access_key_id=accessKey1
aws_secret_access_key=verySecretKey1
[aws_hosted_profile]
aws_access_key_id={{YOUR_ACCESS_KEY}}
aws_secret_access_key={{YOUR_SECRET_KEY}}
[default]
aws_access_key_id=accessKey1
aws_secret_access_key=verySecretKey1
[aws_hosted_profile]
aws_access_key_id={{YOUR_ACCESS_KEY}}
aws_secret_access_key={{YOUR_SECRET_KEY}}
Just as you need to mount your locationConfig.json, you will need to mount your
AWS credentials file at run time:
``-v ~/.aws/credentials:/root/.aws/credentials`` on Linux, OS X, or Unix or
As with locationConfig.json, the AWS credentials file must be mounted at
run time: ``-v ~/.aws/credentials:/root/.aws/credentials`` on Unix-like
systems (Linux, OS X, etc.), or
``-v C:\Users\USERNAME\.aws\credential:/root/.aws/credentials`` on Windows
NOTE: One account can't copy to another account with a source and
destination on real AWS unless the account associated with the
access Key/secret Key pairs used for the destination bucket has rights
to get in the source bucket. ACL's would have to be updated
on AWS directly to enable this.
.. note:: One account cannot copy to another account with a source and
destination on real AWS unless the account associated with the
accessKey/secretKey pairs used for the destination bucket has source
bucket access privileges. To enable this, update ACLs directly on AWS.
S3BACKEND
~~~~~~
~~~~~~~~~
S3BACKEND=file
^^^^^^^^^^^
When storing file data, for it to be persistent you must mount docker volumes
for both data and metadata. See `this section <#using-docker-volumes-in-production>`__
^^^^^^^^^^^^^^
For stored file data to persist, you must mount Docker volumes
for both data and metadata. See :ref:`In Production with a Docker-Hosted CloudServer <in-production-w-a-Docker-hosted-cloudserver>`
S3BACKEND=mem
^^^^^^^^^^
This is ideal for testing - no data will remain after container is shutdown.
^^^^^^^^^^^^^
This is ideal for testing: no data remains after the container is shut down.
ENDPOINT
~~~~~~~~
This variable specifies your endpoint. If you have a domain such as
new.host.com, by specifying that here, you and your users can direct s3
server requests to new.host.com.
This variable specifies the endpoint. To direct CloudServer requests to
new.host.com, for example, specify the endpoint with:
.. code:: shell
.. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 -e ENDPOINT=new.host.com zenko/cloudserver
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
permissions, make sure to associate 127.0.0.1 with ``new.host.com``
.. note:: On Unix-like systems (Linux, OS X, etc.) edit /etc/hosts
to associate 127.0.0.1 with new.host.com.
REMOTE_MANAGEMENT_DISABLE
~~~~~~~~~~~~~~~~~~~~~~~~~
CloudServer is a part of `Zenko <https://www.zenko.io/>`__. When you run CloudServer standalone it will still try to connect to Orbit by default (browser-based graphical user interface for Zenko).
Setting this variable to true(1) will default to accessKey1 and verySecretKey1 for credentials and disable the automatic Orbit management:
.. code-block:: shell
$ docker run -d --name cloudserver -p 8000:8000 -e REMOTE_MANAGEMENT_DISABLE=1 zenko/cloudserver
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These variables specify authentication credentials for an account named
"CustomAccount".
“CustomAccount”.
You can set credentials for many accounts by editing
``conf/authdata.json`` (see below for further info), but if you just
want to specify one set of your own, you can use these environment
variables.
Set account credentials for multiple accounts by editing conf/authdata.json
(see below for further details). To specify one set for personal use, set these
environment variables:
.. code:: shell
.. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey \
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey zenko/cloudserver
Note: Anything in the ``authdata.json`` file will be ignored. Note: The
old ``ACCESS_KEY`` and ``SECRET_KEY`` environment variables are now
deprecated
.. note:: This takes precedence over the contents of the authdata.json
file. The authdata.json file is ignored.
.. note:: The ACCESS_KEY and SECRET_KEY environment variables are
deprecated.
LOG\_LEVEL
~~~~~~~~~~
This variable allows you to change the log level: info, debug or trace.
The default is info. Debug will give you more detailed logs and trace
will give you the most detailed.
This variable changes the log level. There are three levels: info, debug,
and trace. The default is info. Debug provides more detailed logs, and trace
provides the most detailed logs.
.. code:: shell
.. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 -e LOG_LEVEL=trace zenko/cloudserver
SSL
~~~
This variable set to true allows you to run S3 with SSL:
Set true, this variable runs CloudServer with SSL.
**Note1**: You also need to specify the ENDPOINT environment variable.
**Note2**: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
permissions, make sure to associate 127.0.0.1 with ``<YOUR_ENDPOINT>``
If SSL is set true:
**Warning**: These certs, being self-signed (and the CA being generated
inside the container) will be untrusted by any clients, and could
disappear on a container upgrade. That's ok as long as it's for quick
testing. Also, best security practice for non-testing would be to use an
extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
to limit what an exploit on either component could expose, as well as
certificates in a mounted volume
* The ENDPOINT environment variable must also be specified.
* On Unix-like systems (Linux, OS X, etc.), 127.0.0.1 must be associated with
<YOUR_ENDPOINT> in /etc/hosts.
.. Warning:: Self-signed certs with a CA generated within the container are
suitable for testing purposes only. Clients cannot trust them, and they may
disappear altogether on a container upgrade. The best security practice for
production environments is to use an extra container, such as
haproxy/nginx/stunnel, for SSL/TLS termination and to pull certificates
from a mounted volume, limiting what an exploit on either component
can expose.
.. code:: shell
docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT>
scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> \
zenko/cloudserver
More information about how to use S3 server with SSL
`here <https://s3.scality.com/v1.0/page/scality-with-ssl>`__
For more information about using ClousdServer with SSL, see `Using SSL <GETTING_STARTED.html#Using SSL>`__
LISTEN\_ADDR
~~~~~~~~~~~~
This variable instructs the Zenko CloudServer, and its data and metadata
components to listen on the specified address. This allows starting the data
or metadata servers as standalone services, for example.
This variable causes CloudServer and its data and metadata components to
listen on the specified address. This allows starting the data or metadata
servers as standalone services, for example.
.. code:: shell
@ -174,8 +185,8 @@ Zenko CloudServer.
.. code:: shell
docker run -d --name s3server -e DATA_HOST=s3server-data
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
$ docker run -d --name cloudserver -e DATA_HOST=cloudserver-data \
-e METADATA_HOST=cloudserver-metadata zenko/cloudserver yarn run start_s3server
REDIS\_HOST
~~~~~~~~~~~
@ -185,19 +196,21 @@ localhost.
.. code:: shell
docker run -d --name s3server -p 8000:8000
-e REDIS_HOST=my-redis-server.example.com scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 \
-e REDIS_HOST=my-redis-server.example.com zenko/cloudserver
REDIS\_PORT
~~~~~~~~~~~
Use this variable to connect to the redis cache server on another port than
the default 6379.
Use this variable to connect to the Redis cache server on a port other
than the default 6379.
.. code:: shell
docker run -d --name s3server -p 8000:8000
-e REDIS_PORT=6379 scality/s3server
$ docker run -d --name cloudserver -p 8000:8000 \
-e REDIS_PORT=6379 zenko/cloudserver
.. _tunables-and-setup-tips:
Tunables and Setup Tips
-----------------------
@ -205,60 +218,57 @@ Tunables and Setup Tips
Using Docker Volumes
~~~~~~~~~~~~~~~~~~~~
Zenko CloudServer runs with a file backend by default.
CloudServer runs with a file backend by default, meaning that data is
stored inside the CloudServers Docker container.
So, by default, the data is stored inside your Zenko CloudServer Docker
container.
For data and metadata to persist, data and metadata must be hosted in Docker
volumes outside the CloudServers Docker container. Otherwise, the data
and metadata are destroyed when the container is erased.
However, if you want your data and metadata to persist, you **MUST** use
Docker volumes to host your data and metadata outside your Zenko CloudServer
Docker container. Otherwise, the data and metadata will be destroyed
when you erase the container.
.. code-block:: shell
.. code:: shell
$ docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata \
-p 8000:8000 ­-d zenko/cloudserver
docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata
-p 8000:8000 ­-d scality/s3server
This command mounts the ./data host directory to the container
at /usr/src/app/localData and the ./metadata host directory to
the container at /usr/src/app/localMetaData.
This command mounts the host directory, ``./data``, into the container
at ``/usr/src/app/localData`` and the host directory, ``./metadata``, into
the container at ``/usr/src/app/localMetaData``. It can also be any host
mount point, like ``/mnt/data`` and ``/mnt/metadata``.
.. tip:: These host directories can be mounted to any accessible mount
point, such as /mnt/data and /mnt/metadata, for example.
Adding modifying or deleting accounts or users credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adding, Modifying, or Deleting Accounts or Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1. Create locally a customized ``authdata.json`` based on our ``/conf/authdata.json``.
1. Create a customized authdata.json file locally based on /conf/authdata.json.
2. Use `Docker volumes <https://docs.docker.com/storage/volumes/>`__
to override the default ``authdata.json`` through a Docker file mapping.
2. Use `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
to override the default ``authdata.json`` through a docker file mapping.
For example:
.. code:: shell
.. code-block:: shell
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
scality/s3server
$ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d \
zenko/cloudserver
Specifying your own host name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Specifying a Host Name
~~~~~~~~~~~~~~~~~~~~~~
To specify a host name (e.g. s3.domain.name), you can provide your own
`config.json <https://github.com/scality/S3/blob/master/config.json>`__
using `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__.
To specify a host name (for example, s3.domain.name), provide your own
`config.json <https://github.com/scality/cloudserver/blob/master/config.json>`__
file using `Docker volumes <https://docs.docker.com/storage/volumes/>`__.
First add a new key-value pair in the restEndpoints section of your
config.json. The key in the key-value pair should be the host name you
would like to add and the value is the default location\_constraint for
this endpoint.
First, add a new key-value pair to the restEndpoints section of your
config.json. Make the key the host name you want, and the value the default
location\_constraint for this endpoint.
For example, ``s3.example.com`` is mapped to ``us-east-1`` which is one
of the ``location_constraints`` listed in your locationConfig.json file
`here <https://github.com/scality/S3/blob/master/locationConfig.json>`__.
More information about location configuration
`here <https://github.com/scality/S3/blob/master/README.md#location-configuration>`__
For more information about location configuration, see:
`GETTING STARTED <GETTING_STARTED.html#location-configuration>`__
.. code:: json
@ -266,33 +276,33 @@ More information about location configuration
"localhost": "file",
"127.0.0.1": "file",
...
"s3.example.com": "us-east-1"
"cloudserver.example.com": "us-east-1"
},
Then, run your Scality S3 Server using `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
Next, run CloudServer using a `Docker volume
<https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
.. code:: shell
.. code-block:: shell
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
$ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d zenko/cloudserver
Your local ``config.json`` file will override the default one through a
docker file mapping.
The local ``config.json`` file overrides the default one through a Docker
file mapping.
Running as an unprivileged user
Running as an Unprivileged User
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Zenko CloudServer runs as root by default.
CloudServer runs as root by default.
You can change that by modifing the dockerfile and specifying a user
before the entrypoint.
To change this, modify the dockerfile and specify a user before the
entry point.
The user needs to exist within the container, and own the folder
**/usr/src/app** for Scality Zenko CloudServer to run properly.
The user must exist within the container, and must own the
/usr/src/app directory for CloudServer to run.
For instance, you can modify these lines in the dockerfile:
For example, the following dockerfile lines can be modified:
.. code:: shell
.. code-block:: shell
...
&& groupadd -r -g 1001 scality \
@ -304,54 +314,58 @@ For instance, you can modify these lines in the dockerfile:
USER scality
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
Continuous integration with Docker hosted CloudServer
-----------------------------------------------------
.. _continuous-integration-with-docker-hosted-cloudserver:
When you start the Docker Scality Zenko CloudServer image, you can adjust the
configuration of the Scality Zenko CloudServer instance by passing one or more
environment variables on the docker run command line.
Continuous Integration with a Docker-Hosted CloudServer
-------------------------------------------------------
Sample ways to run it for CI are:
When you start the Docker CloudServer image, you can adjust the
configuration of the CloudServer instance by passing one or more
environment variables on the ``docker run`` command line.
- With custom locations (one in-memory, one hosted on AWS), and custom
credentials mounted:
.. code:: shell
To run CloudServer for CI with custom locations (one in-memory,
one hosted on AWS), and custom credentials mounted:
docker run --name CloudServer -p 8000:8000
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
-v ~/.aws/credentials:/root/.aws/credentials
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
.. code-block:: shell
- With custom locations, (one in-memory, one hosted on AWS, one file),
and custom credentials set as environment variables
(see `this section <#scality-access-key-id-and-scality-secret-access-key>`__):
$ docker run --name CloudServer -p 8000:8000 \
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
-v ~/.aws/credentials:/root/.aws/credentials \
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
.. code:: shell
To run CloudServer for CI with custom locations, (one in-memory, one
hosted on AWS, and one file), and custom credentials `set as environment
variables <GETTING_STARTED.html#scality-access-key-id-and-scality-secret-access-key>`__):
docker run --name CloudServer -p 8000:8000
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
-v ~/.aws/credentials:/root/.aws/credentials
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
-e SCALITY_ACCESS_KEY_ID=accessKey1
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1
-e S3DATA=multiple -e S3BACKEND=mem scality/s3server
.. code-block:: shell
In production with Docker hosted CloudServer
--------------------------------------------
$ docker run --name CloudServer -p 8000:8000 \
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v ~/.aws/credentials:/root/.aws/credentials \
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
-e SCALITY_ACCESS_KEY_ID=accessKey1 \
-e SCALITY_SECRET_ACCESS_KEY=verySecretKey1 \
-e S3DATA=multiple -e S3BACKEND=mem zenko/cloudserver
In production, we expect that data will be persistent, that you will use the
multiple backends capabilities of Zenko CloudServer, and that you will have a
custom endpoint for your local storage, and custom credentials for your local
storage:
.. _in-production-w-a-Docker-hosted-cloudserver:
.. code:: shell
In Production with a Docker-Hosted CloudServer
----------------------------------------------
docker run -d --name CloudServer
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple
-e ENDPOINT=custom.endpoint.com
-p 8000:8000 ­-d scality/s3server
Because data must persist in production settings, CloudServer offers
multiple-backend capabilities. This requires a custom endpoint
and custom credentials for local storage.
Customize these with:
.. code-block:: shell
$ docker run -d --name CloudServer \
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata \
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json \
-v ~/.aws/credentials:/root/.aws/credentials -e S3DATA=multiple \
-e ENDPOINT=custom.endpoint.com \
-p 8000:8000 ­-d zenko/cloudserver \

View File

@ -1,193 +1,201 @@
Getting Started
=================
===============
.. figure:: ../res/scality-cloudserver-logo.png
:alt: Zenko CloudServer logo
|CircleCI| |Scality CI|
Dependencies
------------
Building and running the Scality Zenko CloudServer requires node.js 10.x and
yarn v1.17.x. Up-to-date versions can be found at
`Nodesource <https://github.com/nodesource/distributions>`__.
Installation
------------
Dependencies
~~~~~~~~~~~~
1. Clone the source code
Building and running the Scality Zenko CloudServer requires node.js 6.9.5 and
npm v3 . Up-to-date versions can be found at
`Nodesource <https://github.com/nodesource/distributions>`__.
.. code-block:: shell
Clone source code
~~~~~~~~~~~~~~~~~
$ git clone https://github.com/scality/cloudserver.git
.. code:: shell
2. Go to the cloudserver directory and use yarn to install the js dependencies.
git clone https://github.com/scality/S3.git
.. code-block:: shell
Install js dependencies
~~~~~~~~~~~~~~~~~~~~~~~
$ cd cloudserver
$ yarn install
Go to the ./S3 folder,
Running CloudServer with a File Backend
---------------------------------------
.. code:: shell
.. code-block:: shell
npm install
$ yarn start
Run it with a file backend
--------------------------
This starts a Zenko CloudServer on port 8000. Two additional ports, 9990
and 9991, are also open locally for internal transfer of metadata and
data, respectively.
.. code:: shell
The default access key is accessKey1. The secret key is verySecretKey1.
npm start
By default, metadata files are saved in the localMetadata directory and
data files are saved in the localData directory in the local ./cloudserver
directory. These directories are pre-created within the repository. To
save data or metadata in different locations, you must specify them using
absolute paths. Thus, when starting the server:
This starts an Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
.. code-block:: shell
The default access key is accessKey1 with a secret key of
verySecretKey1.
$ mkdir -m 700 $(pwd)/myFavoriteDataPath
$ mkdir -m 700 $(pwd)/myFavoriteMetadataPath
$ export S3DATAPATH="$(pwd)/myFavoriteDataPath"
$ export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
$ yarn start
By default the metadata files will be saved in the localMetadata
directory and the data files will be saved in the localData directory
within the ./S3 directory on your machine. These directories have been
pre-created within the repository. If you would like to save the data or
metadata in different locations of your choice, you must specify them
with absolute paths. So, when starting the server:
Running CloudServer with Multiple Data Backends
-----------------------------------------------
.. code:: shell
.. code-block:: shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
npm start
$ export S3DATA='multiple'
$ yarn start
Run it with multiple data backends
----------------------------------
This starts a Zenko CloudServer on port 8000.
.. code:: shell
The default access key is accessKey1. The secret key is verySecretKey1.
export S3DATA='multiple'
npm start
With multiple backends, you can choose where each object is saved by setting
the following header with a location constraint in a PUT request:
This starts an Zenko CloudServer on port 8000. The default access key is
accessKey1 with a secret key of verySecretKey1.
With multiple backends, you have the ability to choose where each object
will be saved by setting the following header with a locationConstraint
on a PUT request:
.. code:: shell
.. code-block:: shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
If no header is sent with a PUT object request, the location constraint
of the bucket will determine where the data is saved. If the bucket has
no location constraint, the endpoint of the PUT request will be used to
determine location.
If no header is sent with a PUT object request, the buckets location
constraint determines where the data is saved. If the bucket has no
location constraint, the endpoint of the PUT request determines location.
See the Configuration section below to learn how to set location
constraints.
See the Configuration_ section to set location constraints.
Run it with an in-memory backend
--------------------------------
Run CloudServer with an In-Memory Backend
-----------------------------------------
.. code-block:: shell
$ yarn run mem_backend
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1. The secret key is verySecretKey1.
Run CloudServer with Vault User Management
------------------------------------------
.. code:: shell
npm run mem_backend
export S3VAULT=vault
yarn start
This starts an Zenko CloudServer on port 8000. The default access key is
accessKey1 with a secret key of verySecretKey1.
Note: Vault is proprietary and must be accessed separately.
This starts a Zenko CloudServer using Vault for user management.
Run it for continuous integration testing or in production with Docker
----------------------------------------------------------------------
Run CloudServer for Continuous Integration Testing or in Production with Docker
-------------------------------------------------------------------------------
`DOCKER <../DOCKER/>`__
Run Cloudserver with `DOCKER <DOCKER.html>`__
Testing
-------
~~~~~~~
You can run the unit tests with the following command:
Run unit tests with the command:
.. code:: shell
.. code-block:: shell
npm test
$ yarn test
You can run the multiple backend unit tests with:
Run multiple-backend unit tests with:
.. code:: shell
CI=true S3DATA=multiple npm start
npm run multiple_backend_test
.. code-block:: shell
You can run the linter with:
$ CI=true S3DATA=multiple yarn start
$ yarn run multiple_backend_test
.. code:: shell
Run the linter with:
npm run lint
.. code-block:: shell
Running functional tests locally:
$ yarn run lint
For the AWS backend and Azure backend tests to pass locally,
you must modify tests/locationConfigTests.json so that awsbackend
specifies a bucketname of a bucket you have access to based on
your credentials profile and modify "azurebackend" with details
for your Azure account.
Running Functional Tests Locally
--------------------------------
To pass AWS and Azure backend tests locally, modify
tests/locationConfig/locationConfigTests.json so that ``awsbackend``
specifies the bucketname of a bucket you have access to based on your
credentials, and modify ``azurebackend`` with details for your Azure account.
The test suite requires additional tools, **s3cmd** and **Redis**
installed in the environment the tests are running in.
- Install `s3cmd <http://s3tools.org/download>`__
- Install `redis <https://redis.io/download>`__ and start Redis.
- Add localCache section to your ``config.json``:
1. Install `s3cmd <http://s3tools.org/download>`__
::
2. Install `redis <https://redis.io/download>`__ and start Redis.
3. Add localCache section to ``config.json``:
.. code:: json
"localCache": {
"host": REDIS_HOST,
"port": REDIS_PORT
}
where ``REDIS_HOST`` is your Redis instance IP address (``"127.0.0.1"``
if your Redis is running locally) and ``REDIS_PORT`` is your Redis
instance port (``6379`` by default)
where ``REDIS_HOST`` is the Redis instance IP address (``"127.0.0.1"``
if Redis is running locally) and ``REDIS_PORT`` is the Redis instance
port (``6379`` by default)
- Add the following to the etc/hosts file on your machine:
4. Add the following to the local etc/hosts file:
.. code:: shell
.. code-block:: shell
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
- Start the Zenko CloudServer in memory and run the functional tests:
5. Start Zenko CloudServer in memory and run the functional tests:
.. code:: shell
.. code-block:: shell
CI=true npm run mem_backend
CI=true npm run ft_test
$ CI=true yarn run mem_backend
$ CI=true yarn run ft_test
.. _Configuration:
Configuration
-------------
There are three configuration files for your Scality Zenko CloudServer:
There are three configuration files for Zenko CloudServer:
1. ``conf/authdata.json``, described above for authentication
* ``conf/authdata.json``, for authentication.
2. ``locationConfig.json``, to set up configuration options for
* ``locationConfig.json``, to configure where data is saved.
where data will be saved
* ``config.json``, for general configuration options.
3. ``config.json``, for general configuration options
.. _location-configuration:
Location Configuration
~~~~~~~~~~~~~~~~~~~~~~
You must specify at least one locationConstraint in your
locationConfig.json (or leave as pre-configured).
You must specify at least one locationConstraint in locationConfig.json
(or leave it as pre-configured).
You must also specify 'us-east-1' as a locationConstraint so if you only
define one locationConstraint, that would be it. If you put a bucket to
an unknown endpoint and do not specify a locationConstraint in the put
bucket call, us-east-1 will be used.
You must also specify 'us-east-1' as a locationConstraint. If you put a
bucket to an unknown endpoint and do not specify a locationConstraint in
the PUT bucket call, us-east-1 is used.
For instance, the following locationConstraint will save data sent to
For instance, the following locationConstraint saves data sent to
``myLocationConstraint`` to the file backend:
.. code:: json
@ -198,17 +206,16 @@ For instance, the following locationConstraint will save data sent to
"details": {}
},
Each locationConstraint must include the ``type``,
``legacyAwsBehavior``, and ``details`` keys. ``type`` indicates which
backend will be used for that region. Currently, mem, file, and scality
are the supported backends. ``legacyAwsBehavior`` indicates whether the
region will have the same behavior as the AWS S3 'us-east-1' region. If
the locationConstraint type is scality, ``details`` should contain
connector information for sproxyd. If the locationConstraint type is mem
or file, ``details`` should be empty.
Each locationConstraint must include the ``type``, ``legacyAwsBehavior``,
and ``details`` keys. ``type`` indicates which backend is used for that
region. Supported backends are mem, file, and scality.``legacyAwsBehavior``
indicates whether the region behaves the same as the AWS S3 'us-east-1'
region. If the locationConstraint type is ``scality``, ``details`` must
contain connector information for sproxyd. If the locationConstraint type
is ``mem`` or ``file``, ``details`` must be empty.
Once you have your locationConstraints in your locationConfig.json, you
can specify a default locationConstraint for each of your endpoints.
Once locationConstraints is set in locationConfig.json, specify a default
locationConstraint for each endpoint.
For instance, the following sets the ``localhost`` endpoint to the
``myLocationConstraint`` data backend defined above:
@ -219,26 +226,24 @@ For instance, the following sets the ``localhost`` endpoint to the
"localhost": "myLocationConstraint"
},
If you would like to use an endpoint other than localhost for your
Scality Zenko CloudServer, that endpoint MUST be listed in your
``restEndpoints``. Otherwise if your server is running with a:
To use an endpoint other than localhost for Zenko CloudServer, the endpoint
must be listed in ``restEndpoints``. Otherwise, if the server is running
with a:
- **file backend**: your default location constraint will be ``file``
- **memory backend**: your default location constraint will be ``mem``
* **file backend**: The default location constraint is ``file``
* **memory backend**: The default location constraint is ``mem``
Endpoints
~~~~~~~~~
Note that our Zenko CloudServer supports both:
The Zenko CloudServer supports endpoints that are rendered in either:
- path-style: http://myhostname.com/mybucket
- hosted-style: http://mybucket.myhostname.com
* path style: http://myhostname.com/mybucket or
* hosted style: http://mybucket.myhostname.com
However, hosted-style requests will not hit the server if you are using
an ip address for your host. So, make sure you are using path-style
requests in that case. For instance, if you are using the AWS SDK for
JavaScript, you would instantiate your client like this:
However, if an IP address is specified for the host, hosted-style requests
cannot reach the server. Use path-style requests in that case. For example,
if you are using the AWS SDK for JavaScript, instantiate your client like this:
.. code:: js
@ -247,87 +252,99 @@ JavaScript, you would instantiate your client like this:
s3ForcePathStyle: true,
});
Setting your own access key and secret key pairs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Setting Your Own Access and Secret Key Pairs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can set credentials for many accounts by editing
``conf/authdata.json`` but if you want to specify one set of your own
credentials, you can use ``SCALITY_ACCESS_KEY_ID`` and
``SCALITY_SECRET_ACCESS_KEY`` environment variables.
Credentials can be set for many accounts by editing ``conf/authdata.json``,
but use the ``SCALITY_ACCESS_KEY_ID`` and ``SCALITY_SECRET_ACCESS_KEY``
environment variables to specify your own credentials.
_`scality-access-key-id-and-scality-secret-access-key`
SCALITY\_ACCESS\_KEY\_ID and SCALITY\_SECRET\_ACCESS\_KEY
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These variables specify authentication credentials for an account named
"CustomAccount".
“CustomAccount”.
Note: Anything in the ``authdata.json`` file will be ignored.
.. note:: Anything in the ``authdata.json`` file is ignored.
.. code:: shell
.. code-block:: shell
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
$ SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey yarn start
.. _Using_SSL:
Scality with SSL
~~~~~~~~~~~~~~~~~~~~~~
Using SSL
~~~~~~~~~
If you wish to use https with your local Zenko CloudServer, you need to set up
SSL certificates. Here is a simple guide of how to do it.
To use https with your local CloudServer, you must set up
SSL certificates.
Deploying Zenko CloudServer
^^^^^^^^^^^^^^^^^^^
1. Deploy CloudServer using `our DockerHub page
<https://hub.docker.com/r/zenko/cloudserver/>`__ (run it with a file
backend).
First, you need to deploy **Zenko CloudServer**. This can be done very easily
via `our **DockerHub**
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
with a file backend).
.. Note:: If Docker is not installed locally, follow the
`instructions to install it for your distribution
<https://docs.docker.com/engine/installation/>`__
*Note:* *- If you don't have docker installed on your machine, here
are the `instructions to install it for your
distribution <https://docs.docker.com/engine/installation/>`__*
2. Update the CloudServer containers config
Updating your Zenko CloudServer container's config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Add your certificates to your container. To do this,
#. exec inside the CloudServer container.
You're going to add your certificates to your container. In order to do
so, you need to exec inside your Zenko CloudServer container. Run a
``$> docker ps`` and find your container's id (the corresponding image
name should be ``scality/s3server``. Copy the corresponding container id
(here we'll use ``894aee038c5e``, and run:
#. Run ``$> docker ps`` to find the containers ID (the corresponding
image name is ``scality/cloudserver``.
.. code:: sh
#. Copy the corresponding container ID (``894aee038c5e`` in the present
example), and run:
.. code-block:: shell
$> docker exec -it 894aee038c5e bash
You're now inside your container, using an interactive terminal :)
This puts you inside your container, using an interactive terminal.
Generate SSL key and certificates
**********************************
3. Generate the SSL key and certificates. The paths where the different
files are stored are defined after the ``-out`` option in each of the
following commands.
There are 5 steps to this generation. The paths where the different
files are stored are defined after the ``-out`` option in each command
#. Generate a private key for your certificate signing request (CSR):
.. code:: sh
.. code-block:: shell
# Generate a private key for your CSR
$> openssl genrsa -out ca.key 2048
# Generate a self signed certificate for your local Certificate Authority
#. Generate a self-signed certificate for your local certificate
authority (CA):
.. code:: shell
$> openssl req -new -x509 -extensions v3_ca -key ca.key -out ca.crt -days 99999 -subj "/C=US/ST=Country/L=City/O=Organization/CN=scality.test"
# Generate a key for Zenko CloudServer
#. Generate a key for the CloudServer:
.. code:: shell
$> openssl genrsa -out test.key 2048
# Generate a Certificate Signing Request for S3 Server
#. Generate a CSR for CloudServer:
.. code:: shell
$> openssl req -new -key test.key -out test.csr -subj "/C=US/ST=Country/L=City/O=Organization/CN=*.scality.test"
# Generate a local-CA-signed certificate for S3 Server
#. Generate a certificate for CloudServer signed by the local CA:
.. code:: shell
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
Update Zenko CloudServer ``config.json``
**********************************
4. Update Zenko CloudServer ``config.json``. Add a ``certFilePaths``
section to ``./config.json`` with appropriate paths:
Add a ``certFilePaths`` section to ``./config.json`` with the
appropriate paths:
.. code:: json
.. code:: json
"certFilePaths": {
"key": "./test.key",
@ -335,42 +352,36 @@ appropriate paths:
"ca": "./ca.crt"
}
Run your container with the new config
****************************************
5. Run your container with the new config.
First, you need to exit your container. Simply run ``$> exit``. Then,
you need to restart your container. Normally, a simple
``$> docker restart s3server`` should do the trick.
#. Exit the container by running ``$> exit``.
Update your host config
^^^^^^^^^^^^^^^^^^^^^^^
#. Restart the container with ``$> docker restart cloudserver``.
Associates local IP addresses with hostname
*******************************************
6. Update the host configuration by adding s3.scality.test
to /etc/hosts:
In your ``/etc/hosts`` file on Linux, OS X, or Unix (with root
permissions), edit the line of localhost so it looks like this:
::
.. code:: bash
127.0.0.1 localhost s3.scality.test
Copy the local certificate authority from your container
*********************************************************
7. Copy the local certificate authority (ca.crt in step 4) from your
container. Choose the path to save this file to (in the present
example, ``/root/ca.crt``), and run:
In the above commands, it's the file named ``ca.crt``. Choose the path
you want to save this file at (here we chose ``/root/ca.crt``), and run
something like:
.. code:: sh
.. code:: shell
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt
Test your config
^^^^^^^^^^^^^^^^^
.. note:: Your container ID will be different, and your path to
ca.crt may be different.
If you do not have aws-sdk installed, run ``$> npm install aws-sdk``. In
a ``test.js`` file, paste the following script:
Test the Config
^^^^^^^^^^^^^^^
If aws-sdk is not installed, run ``$> yarn install aws-sdk``.
Paste the following script into a file named "test.js":
.. code:: js
@ -410,8 +421,13 @@ a ``test.js`` file, paste the following script:
});
});
Now run that script with ``$> nodejs test.js``. If all goes well, it
should output ``SSL is cool!``. Enjoy that added security!
Now run this script with:
.. code::
$> nodejs test.js
On success, the script outputs ``SSL is cool!``.
.. |CircleCI| image:: https://circleci.com/gh/scality/S3.svg?style=svg

View File

@ -4,40 +4,34 @@ Integrations
High Availability
=================
`Docker swarm <https://docs.docker.com/engine/swarm/>`__ is a
clustering tool developped by Docker and ready to use with its
containers. It allows to start a service, which we define and use as a
means to ensure Zenko CloudServer's continuous availability to the end user.
Indeed, a swarm defines a manager and n workers among n+1 servers. We
will do a basic setup in this tutorial, with just 3 servers, which
already provides a strong service resiliency, whilst remaining easy to
do as an individual. We will use NFS through docker to share data and
`Docker Swarm <https://docs.docker.com/engine/swarm/>`__ is a clustering tool
developed by Docker for use with its containers. It can be used to start
services, which we define to ensure CloudServer's continuous availability to
end users. A swarm defines a manager and *n* workers among *n* + 1 servers.
This tutorial shows how to perform a basic setup with three servers, which
provides strong service resiliency, while remaining easy to use and
maintain. We will use NFS through Docker to share data and
metadata between the different servers.
You will see that the steps of this tutorial are defined as **On
Server**, **On Clients**, **On All Machines**. This refers respectively
to NFS Server, NFS Clients, or NFS Server and Clients. In our example,
the IP of the Server will be **10.200.15.113**, while the IPs of the
Clients will be **10.200.15.96 and 10.200.15.97**
Sections are labeled **On Server**, **On Clients**, or
**On All Machines**, referring respectively to NFS server, NFS clients, or
NFS server and clients. In the present example, the servers IP address is
**10.200.15.113** and the client IP addresses are **10.200.15.96** and
**10.200.15.97**
Installing docker
-----------------
1. Install Docker (on All Machines)
Any version from docker 1.12.6 onwards should work; we used Docker
17.03.0-ce for this tutorial.
Docker 17.03.0-ce is used for this tutorial. Docker 1.12.6 and later will
likely work, but is not tested.
On All Machines
~~~~~~~~~~~~~~~
* On Ubuntu 14.04
Install Docker CE for Ubuntu as `documented at Docker
<https://docs.docker.com/install/linux/docker-ce/ubuntu/>`__.
Install the aufs dependency as recommended by Docker. The required
commands are:
On Ubuntu 14.04
^^^^^^^^^^^^^^^
The docker website has `solid
documentation <https://docs.docker.com/engine/installation/linux/ubuntu/>`__.
We have chosen to install the aufs dependency, as recommended by Docker.
Here are the required commands:
.. code:: sh
.. code:: sh
$> sudo apt-get update
$> sudo apt-get install linux-image-extra-$(uname -r) linux-image-extra-virtual
@ -47,14 +41,12 @@ Here are the required commands:
$> sudo apt-get update
$> sudo apt-get install docker-ce
On CentOS 7
^^^^^^^^^^^
* On CentOS 7
Install Docker CE as `documented at Docker
<https://docs.docker.com/install/linux/docker-ce/centos/>`__.
The required commands are:
The docker website has `solid
documentation <https://docs.docker.com/engine/installation/linux/centos/>`__.
Here are the required commands:
.. code:: sh
.. code:: sh
$> sudo yum install -y yum-utils
$> sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
@ -62,31 +54,24 @@ Here are the required commands:
$> sudo yum install docker-ce
$> sudo systemctl start docker
Configure NFS
-------------
2. Install NFS on Client(s)
On Clients
~~~~~~~~~~
NFS clients mount Docker volumes over the NFS servers shared folders.
If the NFS commons are installed, manual mounts are no longer needed.
Your NFS Clients will mount Docker volumes over your NFS Server's shared
folders. Hence, you don't have to mount anything manually, you just have
to install the NFS commons:
* On Ubuntu 14.04
On Ubuntu 14.04
^^^^^^^^^^^^^^^
Install the NFS commons with apt-get:
Simply install the NFS commons:
.. code:: sh
.. code:: sh
$> sudo apt-get install nfs-common
On CentOS 7
^^^^^^^^^^^
* On CentOS 7
Install the NFS utils, and then start the required services:
Install the NFS utils; then start required services:
.. code:: sh
.. code:: sh
$> yum install nfs-utils
$> sudo systemctl enable rpcbind
@ -98,29 +83,24 @@ Install the NFS utils, and then start the required services:
$> sudo systemctl start nfs-lock
$> sudo systemctl start nfs-idmap
On Server
~~~~~~~~~
3. Install NFS (on Server)
Your NFS Server will be the machine to physically host the data and
metadata. The package(s) we will install on it is slightly different
from the one we installed on the clients.
The NFS server hosts the data and metadata. The package(s) to install on it
differs from the package installed on the clients.
On Ubuntu 14.04
^^^^^^^^^^^^^^^
* On Ubuntu 14.04
Install the NFS server specific package and the NFS commons:
Install the NFS server-specific package and the NFS commons:
.. code:: sh
.. code:: sh
$> sudo apt-get install nfs-kernel-server nfs-common
On CentOS 7
^^^^^^^^^^^
* On CentOS 7
Same steps as with the client: install the NFS utils and start the
required services:
Install the NFS utils and start the required services:
.. code:: sh
.. code:: sh
$> yum install nfs-utils
$> sudo systemctl enable rpcbind
@ -132,111 +112,88 @@ required services:
$> sudo systemctl start nfs-lock
$> sudo systemctl start nfs-idmap
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For both distributions:
Choose where your shared data and metadata from your local `Zenko CloudServer
<http://www.zenko.io/cloudserver/>`__ will be stored.
We chose to go with /var/nfs/data and /var/nfs/metadata. You also need
to set proper sharing permissions for these folders as they'll be shared
over NFS:
#. Choose where shared data and metadata from the local
`CloudServer <http://www.zenko.io/cloudserver/>`__ shall be stored (The
present example uses /var/nfs/data and /var/nfs/metadata). Set permissions
for these folders for
sharing over NFS:
.. code:: sh
.. code:: sh
$> mkdir -p /var/nfs/data /var/nfs/metadata
$> chmod -R 777 /var/nfs/
Now you need to update your **/etc/exports** file. This is the file that
configures network permissions and rwx permissions for NFS access. By
default, Ubuntu applies the no\_subtree\_check option, so we declared
both folders with the same permissions, even though they're in the same
tree:
#. The /etc/exports file configures network permissions and r-w-x permissions
for NFS access. Edit /etc/exports, adding the following lines:
.. code:: sh
$> sudo vim /etc/exports
In this file, add the following lines:
.. code:: sh
.. code:: sh
/var/nfs/data 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
/var/nfs/metadata 10.200.15.96(rw,sync,no_root_squash) 10.200.15.97(rw,sync,no_root_squash)
Export this new NFS table:
Ubuntu applies the no\_subtree\_check option by default, so both
folders are declared with the same permissions, even though theyre in
the same tree.
.. code:: sh
#. Export this new NFS table:
.. code:: sh
$> sudo exportfs -a
Eventually, you need to allow for NFS mount from Docker volumes on other
machines. You need to change the Docker config in
**/lib/systemd/system/docker.service**:
#. Edit the ``MountFlags`` option in the Docker config in
/lib/systemd/system/docker.service to enable NFS mount from Docker volumes
on other machines:
.. code:: sh
$> sudo vim /lib/systemd/system/docker.service
In this file, change the **MountFlags** option:
.. code:: sh
.. code:: sh
MountFlags=shared
Now you just need to restart the NFS server and docker daemons so your
changes apply.
#. Restart the NFS server and Docker daemons to apply these changes.
On Ubuntu 14.04
^^^^^^^^^^^^^^^
* On Ubuntu 14.04
Restart your NFS Server and docker services:
.. code:: sh
.. code:: sh
$> sudo service nfs-kernel-server restart
$> sudo service docker restart
On CentOS 7
^^^^^^^^^^^
* On CentOS 7
Restart your NFS Server and docker daemons:
.. code:: sh
.. code:: sh
$> sudo systemctl restart nfs-server
$> sudo systemctl daemon-reload
$> sudo systemctl restart docker
Set up your Docker Swarm service
--------------------------------
On All Machines
~~~~~~~~~~~~~~~
4. Set Up a Docker Swarm
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* On all machines and distributions:
We will now set up the Docker volumes that will be mounted to the NFS
Server and serve as data and metadata storage for Zenko CloudServer. These two
commands have to be replicated on all machines:
Set up the Docker volumes to be mounted to the NFS server for CloudServers
data and metadata storage. The following commands must be replicated on all
machines:
.. code:: sh
.. code:: sh
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/data --name data
$> docker volume create --driver local --opt type=nfs --opt o=addr=10.200.15.113,rw --opt device=:/var/nfs/metadata --name metadata
There is no need to ""docker exec" these volumes to mount them: the
Docker Swarm manager will do it when the Docker service will be started.
There is no need to ``docker exec`` these volumes to mount them: the
Docker Swarm manager does this when the Docker service is started.
On Server
^^^^^^^^^
* On a server:
To start a Docker service on a Docker Swarm cluster, you first have to
initialize that cluster (i.e.: define a manager), then have the
workers/nodes join in, and then start the service. Initialize the swarm
cluster, and look at the response:
To start a Docker service on a Docker Swarm cluster, initialize the cluster
(that is, define a manager), prompt workers/nodes to join in, and then start
the service.
.. code:: sh
Initialize the swarm cluster, and review its response:
.. code:: sh
$> docker swarm init --advertise-addr 10.200.15.113
@ -250,204 +207,180 @@ cluster, and look at the response:
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
On Clients
^^^^^^^^^^
* On clients:
Simply copy/paste the command provided by your docker swarm init. When
all goes well, you'll get something like this:
Copy and paste the command provided by your Docker Swarm init. A successful
request/response will resemble:
.. code:: sh
.. code:: sh
$> docker swarm join --token SWMTKN-1-5yxxencrdoelr7mpltljn325uz4v6fe1gojl14lzceij3nujzu-2vfs9u6ipgcq35r90xws3stka 10.200.15.113:2377
This node joined a swarm as a worker.
On Server
^^^^^^^^^
Set Up Docker Swarm on Clients on a Server
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Start the service on your swarm cluster!
Start the service on the Swarm cluster.
.. code:: sh
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/s3server
$> docker service create --name s3 --replicas 1 --mount type=volume,source=data,target=/usr/src/app/localData --mount type=volume,source=metadata,target=/usr/src/app/localMetadata -p 8000:8000 scality/cloudserver
If you run a docker service ls, you should have the following output:
On a successful installation, ``docker service ls`` returns the following
output:
.. code:: sh
$> docker service ls
ID NAME MODE REPLICAS IMAGE
ocmggza412ft s3 replicated 1/1 scality/s3server:latest
ocmggza412ft s3 replicated 1/1 scality/cloudserver:latest
If your service won't start, consider disabling apparmor/SELinux.
If the service does not start, consider disabling apparmor/SELinux.
Testing your High Availability S3Server
---------------------------------------
Testing the High-Availability CloudServer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
On All Machines
~~~~~~~~~~~~~~~
On Ubuntu 14.04 and CentOS 7
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Try to find out where your Scality Zenko CloudServer is actually running using
the **docker ps** command. It can be on any node of the swarm cluster,
manager or worker. When you find it, you can kill it, with **docker stop
<container id>** and you'll see it respawn on a different node of the
swarm cluster. Now you see, if one of your servers falls, or if docker
stops unexpectedly, your end user will still be able to access your
local Zenko CloudServer.
On all machines (client/server) and distributions (Ubuntu and CentOS),
determine where CloudServer is running using ``docker ps``. CloudServer can
operate on any node of the Swarm cluster, manager or worker. When you find
it, you can kill it with ``docker stop <container id>``. It will respawn
on a different node. Now, if one server falls, or if Docker stops
unexpectedly, the end user will still be able to access your the local CloudServer.
Troubleshooting
---------------
~~~~~~~~~~~~~~~
To troubleshoot the service you can run:
To troubleshoot the service, run:
.. code:: sh
$> docker service ps s3docker service ps s3
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/s3server localhost.localdomain.localdomain Running Running 7 days ago
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/s3server localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
0ar81cw4lvv8chafm8pw48wbc s3.1 scality/cloudserver localhost.localdomain.localdomain Running Running 7 days ago
cvmf3j3bz8w6r4h0lf3pxo6eu \_ s3.1 scality/cloudserver localhost.localdomain.localdomain Shutdown Failed 7 days ago "task: non-zero exit (137)"
If the error is truncated it is possible to have a more detailed view of
the error by inspecting the docker task ID:
If the error is truncated, view the error in detail by inspecting the
Docker task ID:
.. code:: sh
$> docker inspect cvmf3j3bz8w6r4h0lf3pxo6eu
Off you go!
-----------
Let us know what you use this functionality for, and if you'd like any
specific developments around it. Or, even better: come and contribute to
our `Github repository <https://github.com/scality/s3/>`__! We look
forward to meeting you!
~~~~~~~~~~~
Let us know how you use this and if you'd like any specific developments
around it. Even better: come and contribute to our `Github repository
<https://github.com/scality/s3/>`__! We look forward to meeting you!
S3FS
====
Export your buckets as a filesystem with s3fs on top of Zenko CloudServer
You can export buckets as a filesystem with s3fs on CloudServer.
`s3fs <https://github.com/s3fs-fuse/s3fs-fuse>`__ is an open source
tool that allows you to mount an S3 bucket on a filesystem-like backend.
It is available both on Debian and RedHat distributions. For this
tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
Scality's Zenko CloudServer.
tool, available both on Debian and RedHat distributions, that enables
you to mount an S3 bucket on a filesystem-like backend. This tutorial uses
an Ubuntu 14.04 host to deploy and use s3fs over CloudServer.
Deploying Zenko CloudServer with SSL
----------------------------
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
First, you need to deploy **Zenko CloudServer**. This can be done very easily
via `our DockerHub
page <https://hub.docker.com/r/scality/s3server/>`__ (you want to run it
with a file backend).
First, deploy CloudServer with a file backend using `our DockerHub page
<https://hub.docker.com/r/zenko/cloudserver>`__.
*Note:* *- If you don't have docker installed on your machine, here
are the `instructions to install it for your
distribution <https://docs.docker.com/engine/installation/>`__*
.. note::
You also necessarily have to set up SSL with Zenko CloudServer to use s3fs. We
have a nice
`tutorial <https://s3.scality.com/v1.0/page/scality-with-ssl>`__ to help
you do it.
If Docker is not installed on your machine, follow
`these instructions <https://docs.docker.com/engine/installation/>`__
to install it for your distribution.
s3fs setup
----------
You must also set up SSL with CloudServer to use s3fs. See `Using SSL
<./GETTING_STARTED#Using_SSL>`__ for instructions.
s3fs Setup
~~~~~~~~~~
Installing s3fs
~~~~~~~~~~~~~~~
---------------
s3fs has quite a few dependencies. As explained in their
`README <https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation>`__,
the following commands should install everything for Ubuntu 14.04:
Follow the instructions in the s3fs `README
<https://github.com/s3fs-fuse/s3fs-fuse/blob/master/README.md#installation-from-pre-built-packages>`__,
.. code:: sh
$> sudo apt-get install automake autotools-dev g++ git libcurl4-gnutls-dev
$> sudo apt-get install libfuse-dev libssl-dev libxml2-dev make pkg-config
Now you want to install s3fs per se:
.. code:: sh
$> git clone https://github.com/s3fs-fuse/s3fs-fuse.git
$> cd s3fs-fuse
$> ./autogen.sh
$> ./configure
$> make
$> sudo make install
Check that s3fs is properly installed by checking its version. it should
answer as below:
Check that s3fs is properly installed. A version check should return
a response resembling:
.. code:: sh
$> s3fs --version
Amazon Simple Storage Service File System V1.80(commit:d40da2c) with OpenSSL
Copyright (C) 2010 Randy Rizun <rrizun@gmail.com>
License GPL2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Configuring s3fs
~~~~~~~~~~~~~~~~
----------------
s3fs expects you to provide it with a password file. Our file is
``/etc/passwd-s3fs``. The structure for this file is
``ACCESSKEYID:SECRETKEYID``, so, for S3Server, you can run:
``ACCESSKEYID:SECRETKEYID``, so, for CloudServer, you can run:
.. code:: sh
$> echo 'accessKey1:verySecretKey1' > /etc/passwd-s3fs
$> chmod 600 /etc/passwd-s3fs
Using Zenko CloudServer with s3fs
------------------------
Using CloudServer with s3fs
---------------------------
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
1. Use /mnt/tests3fs as a mount point.
.. code:: sh
.. code:: sh
$> mkdir /mnt/tests3fs
Then, you want to create a bucket on your local Zenko CloudServer; we named it
``tests3fs``:
2. Create a bucket on your local CloudServer. In the present example it is
named “tests3fs”.
.. code:: sh
.. code:: sh
$> s3cmd mb s3://tests3fs
*Note:* *- If you've never used s3cmd with our Zenko CloudServer, our README
provides you with a `recommended
config <https://github.com/scality/S3/blob/master/README.md#s3cmd>`__*
3. Mount the bucket to your mount point with s3fs:
Now you can mount your bucket to your mountpoint with s3fs:
.. code:: sh
.. code:: sh
$> s3fs tests3fs /mnt/tests3fs -o passwd_file=/etc/passwd-s3fs -o url="https://s3.scality.test:8000/" -o use_path_request_style
*If you're curious, the structure of this command is*
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``\ *, and the
options are mandatory and serve the following purposes:
* ``passwd_file``\ *: specifiy path to password file;
* ``url``\ *: specify the hostname used by your SSL provider;
* ``use_path_request_style``\ *: force path style (by default, s3fs
uses subdomains (DNS style)).*
The structure of this command is:
``s3fs BUCKET_NAME PATH/TO/MOUNTPOINT -o OPTIONS``. Of these mandatory
options:
| From now on, you can either add files to your mountpoint, or add
objects to your bucket, and they'll show in the other.
| For example, let's' create two files, and then a directory with a file
in our mountpoint:
* ``passwd_file`` specifies the path to the password file.
* ``url`` specifies the host name used by your SSL provider.
* ``use_path_request_style`` forces the path style (by default,
s3fs uses DNS-style subdomains).
.. code:: sh
Once the bucket is mounted, files added to the mount point or
objects added to the bucket will appear in both locations.
Example
-------
Create two files, and then a directory with a file in our mount point:
.. code:: sh
$> touch /mnt/tests3fs/file1 /mnt/tests3fs/file2
$> mkdir /mnt/tests3fs/dir1
$> touch /mnt/tests3fs/dir1/file3
Now, I can use s3cmd to show me what is actually in S3Server:
Now, use s3cmd to show what is in CloudServer:
.. code:: sh
.. code:: sh
$> s3cmd ls -r s3://tests3fs
@ -456,35 +389,21 @@ Now, I can use s3cmd to show me what is actually in S3Server:
2017-02-28 17:28 0 s3://tests3fs/file1
2017-02-28 17:28 0 s3://tests3fs/file2
Now you can enjoy a filesystem view on your local Zenko CloudServer!
Now you can enjoy a filesystem view on your local CloudServer.
Duplicity
=========
How to backup your files with Zenko CloudServer.
How to back up your files with CloudServer.
Installing
-----------
Installing Duplicity and its dependencies
Installing Duplicity and its Dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Second, you want to install
`Duplicity <http://duplicity.nongnu.org/index.html>`__. You have to
download `this
tarball <https://code.launchpad.net/duplicity/0.7-series/0.7.11/+download/duplicity-0.7.11.tar.gz>`__,
decompress it, and then checkout the README inside, which will give you
a list of dependencies to install. If you're using Ubuntu 14.04, this is
your lucky day: here is a lazy step by step install.
.. code:: sh
$> apt-get install librsync-dev gnupg
$> apt-get install python-dev python-pip python-lockfile
$> pip install -U boto
Then you want to actually install Duplicity:
To install `Duplicity <http://duplicity.nongnu.org/>`__,
go to `this site <https://code.launchpad.net/duplicity/0.7-series>`__.
Download the latest tarball. Decompress it and follow the instructions
in the README.
.. code:: sh
@ -492,22 +411,29 @@ Then you want to actually install Duplicity:
$> cd duplicity-0.7.11
$> python setup.py install
Using
------
You may receive error messages indicating the need to install some or all
of the following dependencies:
Testing your installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code:: sh
First, we're just going to quickly check that Zenko CloudServer is actually
running. To do so, simply run ``$> docker ps`` . You should see one
container named ``scality/s3server``. If that is not the case, try
``$> docker start s3server``, and check again.
$> apt-get install librsync-dev gnupg
$> apt-get install python-dev python-pip python-lockfile
$> pip install -U boto
Secondly, as you probably know, Duplicity uses a module called **Boto**
to send requests to S3. Boto requires a configuration file located in
**``/etc/boto.cfg``** to have your credentials and preferences. Here is
a minimalistic config `that you can finetune following these
instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
Testing the Installation
------------------------
1. Check that CloudServer is running. Run ``$> docker ps``. You should
see one container named ``scality/cloudserver``. If you do not, run
``$> docker start cloudserver`` and check again.
2. Duplicity uses a module called “Boto” to send requests to S3. Boto
requires a configuration file located in ``/etc/boto.cfg`` to store
your credentials and preferences. A minimal configuration
you can fine tune `following these instructions
<http://boto.cloudhackers.com/en/latest/getting_started.html>`__ is
shown here:
::
@ -521,26 +447,25 @@ instructions <http://boto.cloudhackers.com/en/latest/getting_started.html>`__.
# If using SSL, unmute and provide absolute path to local CA certificate
# ca_certificates_file = /absolute/path/to/ca.crt
*Note:* *If you want to set up SSL with Zenko CloudServer, check out our
`tutorial <http://link/to/SSL/tutorial>`__*
.. note:: To set up SSL with CloudServer, check out our `Using SSL
<./GETTING_STARTED#Using_SSL>`__ in GETTING STARTED.
At this point, we've met all the requirements to start running Zenko CloudServer
as a backend to Duplicity. So we should be able to back up a local
folder/file to local S3. Let's try with the duplicity decompressed
folder:
3. At this point all requirements to run CloudServer as a backend to Duplicity
have been met. A local folder/file should back up to the local S3.
Try it with the decompressed Duplicity folder:
.. code:: sh
$> duplicity duplicity-0.7.11 "s3://127.0.0.1:8000/testbucket/"
*Note:* *Duplicity will prompt you for a symmetric encryption
passphrase. Save it somewhere as you will need it to recover your
data. Alternatively, you can also add the ``--no-encryption`` flag
and the data will be stored plain.*
.. note:: Duplicity will prompt for a symmetric encryption passphrase.
Save it carefully, as you will need it to recover your data.
Alternatively, you can add the ``--no-encryption`` flag
and the data will be stored plain.
If this command is succesful, you will get an output looking like this:
If this command is successful, you will receive an output resembling:
::
.. code:: sh
--------------[ Backup Statistics ]--------------
StartTime 1486486547.13 (Tue Feb 7 16:55:47 2017)
@ -560,15 +485,13 @@ If this command is succesful, you will get an output looking like this:
Errors 0
-------------------------------------------------
Congratulations! You can now backup to your local S3 through duplicity
:)
Congratulations! You can now back up to your local S3 through Duplicity.
Automating backups
~~~~~~~~~~~~~~~~~~~
Automating Backups
------------------
Now you probably want to back up your files periodically. The easiest
way to do this is to write a bash script and add it to your crontab.
Here is my suggestion for such a file:
The easiest way to back up files periodically is to write a bash script
and add it to your crontab. A suggested script follows.
.. code:: sh
@ -577,33 +500,33 @@ Here is my suggestion for such a file:
# Export your passphrase so you don't have to type anything
export PASSPHRASE="mypassphrase"
# If you want to use a GPG Key, put it here and unmute the line below
# To use a GPG key, put it here and uncomment the line below
#GPG_KEY=
# Define your backup bucket, with localhost specified
DEST="s3://127.0.0.1:8000/testbuckets3server/"
DEST="s3://127.0.0.1:8000/testbucketcloudserver/"
# Define the absolute path to the folder you want to backup
# Define the absolute path to the folder to back up
SOURCE=/root/testfolder
# Set to "full" for full backups, and "incremental" for incremental backups
# Warning: you have to perform one full backup befor you can perform
# Warning: you must perform one full backup befor you can perform
# incremental ones on top of it
FULL=incremental
# How long to keep backups for; if you don't want to delete old
# backups, keep empty; otherwise, syntax is "1Y" for one year, "1M"
# for one month, "1D" for one day
# How long to keep backups. If you don't want to delete old backups, keep
# this value empty; otherwise, the syntax is "1Y" for one year, "1M" for
# one month, "1D" for one day.
OLDER_THAN="1Y"
# is_running checks whether duplicity is currently completing a task
# is_running checks whether Duplicity is currently completing a task
is_running=$(ps -ef | grep duplicity | grep python | wc -l)
# If duplicity is already completing a task, this will simply not run
# If Duplicity is already completing a task, this will not run
if [ $is_running -eq 0 ]; then
echo "Backup for ${SOURCE} started"
# If you want to delete backups older than a certain time, we do it here
# To delete backups older than a certain time, do it here
if [ "$OLDER_THAN" != "" ]; then
echo "Removing backups older than ${OLDER_THAN}"
duplicity remove-older-than ${OLDER_THAN} ${DEST}
@ -626,17 +549,17 @@ Here is my suggestion for such a file:
# Forget the passphrase...
unset PASSPHRASE
So let's say you put this file in ``/usr/local/sbin/backup.sh.`` Next
you want to run ``crontab -e`` and paste your configuration in the file
that opens. If you're unfamiliar with Cron, here is a good `How
To <https://help.ubuntu.com/community/CronHowto>`__. The folder I'm
backing up is a folder I modify permanently during my workday, so I want
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
the line I will paste in my crontab:
Put this file in ``/usr/local/sbin/backup.sh``. Run ``crontab -e`` and
paste your configuration into the file that opens. If you're unfamiliar
with Cron, here is a good `HowTo
<https://help.ubuntu.com/community/CronHowto>`__. If the folder being
backed up is a folder to be modified permanently during the work day,
we can set incremental backups every 5 minutes from 8 AM to 9 PM Monday
through Friday by pasting the following line into crontab:
.. code:: cron
.. code:: sh
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh
Now I can try and add / remove files from the folder I'm backing up, and
I will see incremental backups in my bucket.
Adding or removing files from the folder being backed up will result in
incremental backups in the bucket.

263
docs/MD_SEARCH.rst Normal file
View File

@ -0,0 +1,263 @@
Metadata Search Documentation
=============================
Description
-----------
This feature enables metadata search to be performed on the metadata of objects
stored in Zenko.
Requirements
------------
* MongoDB
Design
------
The Metadata Search feature expands on the existing :code:`GET Bucket` S3 API by
enabling users to conduct metadata searches by adding the custom Zenko query
string parameter, :code:`search`. The :code:`search` parameter is structured as a pseudo
SQL WHERE clause, and supports basic SQL operators. For example:
:code:`"A=1 AND B=2 OR C=3"` (complex queries can be built using nesting
operators, :code:`(` and :code:`)`).
The search process is as follows:
* Zenko receives a :code:`GET` request.
.. code::
# regular getBucket request
GET /bucketname HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
# getBucket versions request
GET /bucketname?versions HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
# search getBucket request
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
* If the request does *not* contain the :code:`search` query parameter, Zenko performs
a normal bucket listing and returns an XML result containing the list of
objects.
* If the request *does* contain the :code:`search` query parameter, Zenko parses and
validates the search string.
- If the search string is invalid, Zenko returns an :code:`InvalidArgument` error.
.. code::
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<Error>
<Code>InvalidArgument</Code>
<Message>Invalid sql where clause sent as search query</Message>
<Resource></Resource>
<RequestId>d1d6afc64345a8e1198e</RequestId>
</Error>
- If the search string is valid, Zenko parses it and generates an abstract
syntax tree (AST). The AST is then passed to the MongoDB backend to be
used as the query filter for retrieving objects from a bucket that
satisfies the requested search conditions. Zenko parses the filtered
results and returns them as the response.
Metadata search results have the same structure as a :code:`GET Bucket` response:
.. code:: xml
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucketname</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>objectKey</Key>
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
<ETag>&quot;d41d8cd98f00b204e9800998ecf8427e&quot;</ETag>
<Size>0</Size>
<Owner>
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
<DisplayName>Bart</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
</Contents>
<Contents>
...
</Contents>
</ListBucketResult>
Performing Metadata Searches with Zenko
---------------------------------------
You can perform metadata searches by:
+ Using the :code:`search_bucket` tool in the
`Scality/S3 <https://github.com/scality/S3>`_ GitHub repository.
+ Creating a signed HTTP request to Zenko in your preferred programming
language.
Using the S3 Tool
+++++++++++++++++
After cloning the `Scality/S3 <https://github.com/scality/S3>`_ GitHub repository
and installing the necessary dependencies, run the following command in the S3
projects root directory to access the search tool:
.. code::
node bin/search_bucket
This generates the following output:
.. code::
Usage: search_bucket [options]
Options:
-V, --version output the version number
-a, --access-key <accessKey> Access key id
-k, --secret-key <secretKey> Secret access key
-b, --bucket <bucket> Name of the bucket
-q, --query <query> Search query
-h, --host <host> Host of the server
-p, --port <port> Port of the server
-s --ssl
-v, --verbose
-h, --help output usage information
In the following examples, Zenko Server is accessible on endpoint
:code:`http://127.0.0.1:8000` and contains the bucket :code:`zenkobucket`.
.. code::
# search for objects with metadata "blue"
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
# search for objects tagged with "type=color"
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
-q "tags.type=color" -h 127.0.0.1 -p 8000
Coding Examples
+++++++++++++++
Search requests can be also performed by making HTTP requests authenticated
with one of the AWS Signature schemes: version 2 or version 4. \
For more about authentication scheme, see:
* https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html
* http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
* http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
You can also view examples for making requests with Auth V4 in various
languages `here <../../../examples>`__.
Specifying Metadata Fields
~~~~~~~~~~~~~~~~~~~~~~~~~~
To search system metadata headers:
.. code::
{system-metadata-key}{supported SQL op}{search value}
# example
key = blueObject
size > 0
key LIKE "blue.*"
To search custom user metadata:
.. code::
# metadata must be prefixed with "x-amz-meta-"
x-amz-meta-{user-metadata-key}{supported SQL op}{search value}
# example
x-amz-meta-color = blue
x-amz-meta-color != red
x-amz-meta-color LIKE "b.*"
To search tags:
.. code::
# tag searches must be prefixed with "tags."
tags.{tag-key}{supported SQL op}{search value}
# example
tags.type = color
Examples queries:
.. code::
# searching for objects with custom metadata "color"=blue" and are tagged
# "type"="color"
tags.type="color" AND x-amz-meta-color="blue"
# searching for objects with the object key containing the substring "blue"
# or (custom metadata "color"=blue" and are tagged "type"="color")
key LIKE '.*blue.*' OR (x-amz-meta-color="blue" AND tags.type="color")
Differences from SQL
++++++++++++++++++++
Zenko metadata search queries are similar to SQL-query :code:`WHERE` clauses, but
differ in that:
* They follow the :code:`PCRE` format
* They do not require values with hyphens to be enclosed in
backticks, :code:``(`)``
.. code::
# SQL query
`x-amz-meta-search-item` = `ice-cream-cone`
# MD Search query
x-amz-meta-search-item = ice-cream-cone
* Search queries do not support all SQL operators.
.. code::
# Supported SQL operators:
=, <, >, <=, >=, !=, AND, OR, LIKE, <>
# Unsupported SQL operators:
NOT, BETWEEN, IN, IS, +, -, %, ^, /, *, !
Using Regular Expressions in Metadata Search
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Regular expressions in Zenko metadata search differ from SQL in the following
ways:
+ Wildcards are represented with :code:`.*` instead of :code:`%`.
+ Regex patterns must be wrapped in quotes. Failure to do this can lead to
misinterpretation of patterns.
+ As with :code:`PCRE`, regular expressions can be entered in either the
:code:`/pattern/` syntax or as the pattern itself if regex options are
not required.
Example regular expressions:
.. code::
# search for strings containing word substring "helloworld"
".*helloworld.*"
"/.*helloworld.*/"
"/.*helloworld.*/i"

21
docs/Makefile Normal file
View File

@ -0,0 +1,21 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = Zenko
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

73
docs/RELEASE.md Normal file
View File

@ -0,0 +1,73 @@
# Cloudserver Release Plan
## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
This allows those images to be used by developers, CI builds,
build chain and so on.
Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
```
## Release Process
To release a production image:
* Create a PR to bump the package version
Update Cloudserver's `package.json` by bumping it to the relevant next
version in a new PR. Per example if the last released version was
`8.4.7`, the next version would be `8.4.8`.
```js
{
"name": "cloudserver",
"version": "8.4.8", <--- Here
[...]
}
```
* Review & merge the PR
* Create the release on GitHub
* Go the Release tab (https://github.com/scality/cloudserver/releases);
* Click on the `Draft new release button`;
* In the `tag` field, type the name of the release (`8.4.8`), and confirm
to create the tag on publish;
* Click on `Generate release notes` button to fill the fields;
* Rename the release to `Release x.y.z` (e.g. `Release 8.4.8` in this case);
* Click to `Publish the release` to create the GitHub release and git tag
Notes:
* the Git tag will be created automatically.
* this should be done as soon as the PR is merged, so that the tag
is put on the "version bump" commit.
* With the following parameters, [force a build here](https://eve.devsca.com/github/scality/cloudserver/#/builders/3/force/force)
* Branch Name: The one used for the tag earlier. In this example `development/8.4`
* Override Stage: 'release'
* Extra properties:
* name: `'tag'`, value: `[release version]`, in this example`'8.4.8'`
* Release the release version on Jira
* Go to the [CloudServer release page](https://scality.atlassian.net/projects/CLDSRV?selectedItem=com.atlassian.jira.jira-projects-plugin:release-page)
* Create a next version
* Name: `[next version]`, in this example `8.4.9`
* Click `...` and select `Release` on the recently released version (`8.4.8`)
* Fill in the field to move incomplete version to the next one

View File

@ -1,10 +1,12 @@
.. _use-public-cloud:
Using Public Clouds as data backends
====================================
Introduction
------------
As stated in our `GETTING STARTED guide <../GETTING_STARTED/#location-configuration>`__,
As stated in our `GETTING STARTED guide <GETTING_STARTED.html#location-configuration>`__,
new data backends can be added by creating a region (also called location
constraint) with the right endpoint and credentials.
This section of the documentation shows you how to set up our currently
@ -160,8 +162,8 @@ CloudServer.
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
-v ~/.aws/credentials:/root/.aws/credentials \
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
-d scality/s3server
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000 \
-d scality/cloudserver
Testing: put an object to AWS S3 using CloudServer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -243,7 +245,7 @@ There are a few configurable options here:
this region should behave like any other AWS S3 region (in the case of MS Azure
hosted data, this is mostly relevant for the format of errors);
- :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually
:code:`https://{{storageAccountName}}.blob.core.windows.name`;
:code:`https://{{storageAccountName}}.blob.core.windows.net`;
- :code:`azureContainerName` : set to an *existing container* in your MS Azure
storage account; this is the container in which your data will be stored for
this location constraint;
@ -322,7 +324,7 @@ CloudServer.
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json \
-v $(pwd)/conf/authdata.json:/usr/src/app/conf/authdata.json \
-e S3DATA=multiple -e ENDPOINT=http://localhost -p 8000:8000
-d scality/s3server
-d scality/cloudserver
Testing: put an object to MS Azure using CloudServer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -0,0 +1,79 @@
============================================
Add New Backend Storage To Zenko CloudServer
============================================
This set of documents aims at bootstrapping developers with Zenko's CloudServer
module, so they can then go on and contribute features.
.. toctree::
:maxdepth: 2
non-s3-compatible-backend
s3-compatible-backend
We always encourage our community to offer new extensions to Zenko,
and new backend support is paramount to meeting more community needs.
If that is something you want to contribute (or just do on your own
version of the cloudserver image), this is the guid to read. Please
make sure you follow our `Contributing Guidelines`_/.
If you need help with anything, please search our `forum`_ for more
information.
Add support for a new backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the main public cloud protocols are supported and more can
be added. There are two main types of backend: those compatible with
Amazon's S3 protocol and those not compatible.
================= ========== ============ ===========
Backend type Supported Active WIP Not started
================= ========== ============ ===========
Private disk/fs x
AWS S3 x
Microsoft Azure x
Backblaze B2 x
Google Cloud x
Openstack Swift x
================= ========== ============ ===========
.. important:: Should you want to request for a new backend to be
supported, please do so by opening a `Github issue`_,
and filling out the "Feature Request" section of our
template.
To add support for a new backend support to CloudServer official
repository, please follow these steps:
- familiarize yourself with our `Contributing Guidelines`_
- open a `Github issue`_ and fill out Feature Request form, and
specify you would like to contribute it yourself;
- wait for our core team to get back to you with an answer on whether
we are interested in taking that contribution in (and hence
committing to maintaining it over time);
- once approved, fork the repository and start your development;
- use the `forum`_ with any question you may have during the
development process;
- when you think it's ready, let us know so that we create a feature
branch against which we'll compare and review your code;
- open a pull request with your changes against that dedicated feature
branch;
- once that pull request gets merged, you're done.
.. tip::
While we do take care of the final rebase (when we merge your feature
branch on the latest default branch), we do ask that you keep up to date with our latest default branch
until then.
.. important::
If we do not approve your feature request, you may of course still
work on supporting a new backend: all our "no" means is that we do not
have the resources, as part of our core development team, to maintain
this feature for the moment.
.. _GitHub issue: https://github.com/scality/S3/issues
.. _Contributing Guidelines: https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md
.. _forum: https://forum.zenko.io

View File

@ -0,0 +1,53 @@
=================
Add A New Backend
=================
Supporting all possible public cloud storage APIs is CloudServer's
ultimate goal. As an open source project, contributions are welcome.
The first step is to get familiar with building a custom Docker image
for CloudServer.
Build a Custom Docker Image
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Clone Zenko's CloudServer, install all dependencies and start the
service:
.. code-block:: shell
$ git clone https://github.com/scality/cloudserver
$ cd cloudserver
$ yarn install
$ yarn start
.. tip::
Some optional dependencies may fail, resulting in you seeing `yarn
WARN` messages; these can safely be ignored. Refer to the User
documentation for all available options.
Build the Docker image:
.. code-block:: shell
# docker build . -t
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
Push the newly created Docker image to your own hub:
.. code-block:: shell
# docker push
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
.. note::
To perform this last operation, you need to be authenticated with DockerHub
There are two main types of backend you could want Zenko to support:
== link:S3_COMPATIBLE_BACKENDS.adoc[S3 compatible data backends]
== link:NON_S3_COMPATIBLE_BACKENDS.adoc[Data backends using another protocol than the S3 protocol]

View File

@ -0,0 +1,530 @@
==========================================================
Adding support for data backends not supporting the S3 API
==========================================================
These backends abstract the complexity of multiple APIs to let users
work on a single common namespace across multiple clouds.
This documents aims at introducing you to the right files in
CloudServer (the Zenko stack's subcomponent in charge of API
translation, among other things) to add support to your own backend of
choice.
General configuration
~~~~~~~~~~~~~~~~~~~~~
There are a number of constants and environment variables to define to support a
new data backend; here is a list and where to find them:
:file:`/constants.js`
---------------------
* give your backend type a name, as part of the `externalBackends` object;
* specify whether versioning is implemented, as part of the
`versioningNotImplemented` object;
:file:`/lib/Config.js`
----------------------
* this is where you should put common utility functions, like the ones to parse
the location object from `locationConfig.json`;
* make sure you define environment variables (like `GCP_SERVICE_EMAIL` as we'll
use those internally for the CI to test against the real remote backend;
:file:`/lib/data/external/{{backendName}}Client.js`
---------------------------------------------------
* this file is where you'll instantiate your backend client; this should be a
class with a constructor taking the config object built in `/lib/Config.js` as
parameter;
* over time, you may need some utility functions which we've defined in the
folder `/api/apiUtils`, and in the file `/lib/data/external/utils`;
:file:`/lib/data/external/utils.js`
-----------------------------------
* make sure to add options for `sourceLocationConstraintType` to be equal to
the name you gave your backend in :file:`/constants.js`;
:file:`/lib/data/external/{{BackendName}}_lib/`
-----------------------------------------------
* this folder is where you'll put the functions needed for supporting your
backend; keep your files as atomic as possible;
:file:`/tests/locationConfig/locationConfigTests.json`
------------------------------------------------------
* this file is where you'll create location profiles to be used by your
functional tests;
:file:`/lib/data/locationConstraintParser.js`
---------------------------------------------
* this is where you'll instantiate your client if the operation the end user
sent effectively writes to your backend; everything happens inside the
function `parseLC()`; you should add a condition that executes if
`locationObj.type` is the name of your backend (that you defined in
`constants.js`), and instantiates a client of yours. See pseudocode below,
assuming location type name is `ztore`:
.. code-block:: js
:linenos:
:emphasize-lines: 12
(...) //<1>
const ZtoreClient = require('./external/ZtoreClient');
const { config } = require('../Config'); //<1>
function parseLC(){ //<1>
(...) //<1>
Object.keys(config.locationConstraints).forEach(location => { //<1>
const locationObj = config.locationConstraints[location]; //<1>
(...) //<1>
if (locationObj.type === 'ztore' {
const ztoreEndpoint = config.getZtoreEndpoint(location);
const ztoreCredentials = config.getZtoreCredentials(location); //<2>
clients[location] = new ZtoreClient({
ztoreEndpoint,
ztoreCredentials,
ztoreBucketname: locationObj.details.ztoreBucketName,
bucketMatch: locationObj.details.BucketMatch,
dataStoreName: location,
}); //<3>
clients[location].clientType = 'ztore';
});
(...) //<1>
});
}
1. Code that is already there
2. You may need more utility functions depending on your backend specs
3. You may have more fields required in your constructor object depending on
your backend specs
Operation of type PUT
~~~~~~~~~~~~~~~~~~~~~
PUT routes are usually where people get started, as it's the easiest to check!
Simply go on your remote backend console and you'll be able to see whether your
object actually went up in the cloud...
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `put()` function is also called
`put()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`put(stream, size, keyContext, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
- `stream`: the stream of data you want to put in the cloud; if you're
unfamiliar with node.js streams, we suggest you start training, as we use
them a lot !
- `size`: the size of the object you're trying to put;
- `keyContext`: an object with metadata about the operation; common entries are
`namespace`, `buckerName`, `owner`, `cipherBundle`, and `tagging`; if these
are not sufficient for your integration, contact us to get architecture
validation before adding new entries;
- `reqUids`: the request unique ID used for logging;
- `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your PUT operation, and
then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/put/put{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
function returning ajusted `keys[]` to your tests.
Operation of type GET
~~~~~~~~~~~~~~~~~~~~~
GET routes are easy to test after PUT routes are implemented, hence why we're
covering them second.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `get()` function is also called
`get()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`get(objectGetInfo, range, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
- `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
- `range`: the range of bytes you will get, for "get-by-range" operations (we
recommend you do simple GETs first, and then look at this);
- `reqUids`: the request unique ID used for logging;
- `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your GET operation, and
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the tutorial in order
(that is, if you have covered the PUT operation already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Operation of type DELETE
~~~~~~~~~~~~~~~~~~~~~~~~
DELETE routes are easy to test after PUT routes are implemented, and they are
similar to GET routes in our implementation, hence why we're covering them
third.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `delete()` function is also called
`delete()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`delete(objectGetInfo, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your DELETE operation,
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/delete/delete{{BackendName}}js`
-----------------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the
tutorial in order (that is, if you have covered the PUT operation
already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Operation of type HEAD
~~~~~~~~~~~~~~~~~~~~~~
HEAD routes are very similar to DELETE routes in our implementation, hence why
we're covering them fourth.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `head()` function is also called
`head()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`head(objectGetInfo, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
// TODO:: generate this from jsdoc
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your HEAD operation,
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the tutorial in order
(that is, if you have covered the PUT operation already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Healthcheck
~~~~~~~~~~~
Healtchecks are used to make sure failure to write to a remote cloud is due to
a problem on that remote cloud, an not on Zenko's side.
This is usually done by trying to create a bucket that already exists, and
making sure you get the expected answer.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `healthcheck()` function is called
`checkExternalBackend()` and it's defined in
:file:`/lib/data/multipleBackendGateway.js`; you will need to add your own;
- your healtcheck function should get `location` as a parameter, which is an
object comprising:`
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/{{backendName}}_create_bucket.js`
-------------------------------------------------------------------------------
- this is where you should write the function performing the actual bucket
creation;
:file:`/lib/data/external/{{backendName}}_lib/utils.js`
-------------------------------------------------------
- add an object named per your backend's name to the `backendHealth` dictionary,
with proper `response` and `time` entries;
:file:`lib/data/multipleBackendGateway.js`
------------------------------------------
- edit the `healthcheck` function to add your location's array, and call your
healthcheck; see pseudocode below for a sample implementation, provided your
backend name is `ztore`
.. code-block:: js
:linenos:
(...) //<1>
healthcheck: (flightCheckOnStartUp, log, callback) => { //<1>
(...) //<1>
const ztoreArray = []; //<2>
async.each(Object.keys(clients), (location, cb) => { //<1>
(...) //<1>
} else if (client.clientType === 'ztore' {
ztoreArray.push(location); //<3>
return cb();
}
(...) //<1>
multBackendResp[location] = { code: 200, message: 'OK' }; //<1>
return cb();
}, () => { //<1>
async.parallel([
(...) //<1>
next => checkExternalBackend( //<4>
clients, ztoreArray, 'ztore', flightCheckOnStartUp,
externalBackendHealthCheckInterval, next),
] (...) //<1>
});
(...) //<1>
});
}
1. Code that is already there
2. The array that will store all locations of type 'ztore'
3. Where you add locations of type 'ztore' to the array
4. Where you actually call the healthcheck function on all 'ztore' locations
Multipart upload (MPU)
~~~~~~~~~~~~~~~~~~~~~~
This is the final part to supporting a new backend! MPU is far from
the easiest subject, but you've come so far it shouldn't be a problem.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
You'll be creating four functions with template signatures:
- `createMPU(Key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
cacheControl, contentDisposition, contentEncoding, log, callback)` will
initiate the multi part upload process; now, here, all parameters are
metadata headers except for:
* `Key`, the key id for the final object (collection of all parts);
* `bucketName`, the name of the bucket to which we will do an MPU;
* `log`, the logger;
- `uploadPart(request, streamingV4Params, stream, size, key, uploadId, partNumber, bucketName, log, callback)`
will be called for each part; the parameters can be explicited as follow:
* `request`, the request object for putting the part;
* `streamingV4Params`, parameters for auth V4 parameters against S3;
* `stream`, the node.js readable stream used to put the part;
* `size`, the size of the part;
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `partNumber`, the number of the part in this MPU (ordered);
* `bucketName`, the name of the bucket to which we will do an MPU;
* `log`, the logger;
- `completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback)` will
end the MPU process once all parts are uploaded; parameters can be explicited
as follows:
* `jsonList`, user-sent list of parts to include in final mpu object;
* `mdInfo`, object containing 3 keys: storedParts, mpuOverviewKey, and
splitter;
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `bucketName`, name of bucket;
* `log`, logger instance:
- `abortMPU(key, uploadId, bucketName, log, callback)` will handle errors, and
make sure that all parts that may have been uploaded will be deleted if the
MPU ultimately fails; the parameters are:
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `bucketName`, name of bucket;
* `log`, logger instance.
:file:`/lib/api/objectPutPart.js`
---------------------------------
- you'll need to add your backend type in appropriate sections (simply look for
other backends already implemented).
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your MPU operations,
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`lib/data/multipleBackendGateway.js`
------------------------------------------
- edit the `createMPU` function to add your location type, and call your
`createMPU()`; see pseudocode below for a sample implementation, provided your
backend name is `ztore`
.. code-block:: javascript
:linenos:
(...) //<1>
createMPU:(key, metaHeaders, bucketName, websiteRedirectHeader, //<1>
location, contentType, cacheControl, contentDisposition,
contentEncoding, log, cb) => {
const client = clients[location]; //<1>
if (client.clientType === 'aws_s3') { //<1>
return client.createMPU(key, metaHeaders, bucketName,
websiteRedirectHeader, contentType, cacheControl,
contentDisposition, contentEncoding, log, cb);
} else if (client.clientType === 'ztore') { //<2>
return client.createMPU(key, metaHeaders, bucketName,
websiteRedirectHeader, contentType, cacheControl,
contentDisposition, contentEncoding, log, cb);
}
return cb();
};
(...) //<1>
1. Code that is already there
2. Where the `createMPU()` of your client is actually called
Add functional tests
~~~~~~~~~~~~~~~~~~~~
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/initMPU/{{BackendName}}InitMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/listParts/{{BackendName}}ListPart.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/{{BackendName}}AbortMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/{{BackendName}}CompleteMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/{{BackendName}}UploadPart.js`
Adding support in Orbit, Zenko's UI for simplified Multi Cloud Management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This can only be done by our core developers' team. Once your backend
integration is merged, you may open a feature request on the
`Zenko repository`_, and we will
get back to you after we evaluate feasability and maintainability.
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new

View File

@ -0,0 +1,43 @@
======================
S3-Compatible Backends
======================
Adding Support in CloudServer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is the easiest case for backend support integration: there is nothing to do
but configuration! Follow the steps described in our
:ref:`use-public-cloud` and make sure you:
- set ``details.awsEndpoint`` to your storage provider endpoint;
- use ``details.credentials`` and *not* ``details.credentialsProfile`` to set your
credentials for that S3-compatible backend.
For example, if youre using a Wasabi bucket as a backend, then your region
definition for that backend will look something like:
::
"wasabi-bucket-zenkobucket": {
"type": "aws_s3",
"legacyAwsBehavior": true,
"details": {
"awsEndpoint": "s3.wasabisys.com",
"bucketName": "zenkobucket",
"bucketMatch": true,
"credentials": {
"accessKey": "\\{YOUR_WASABI_ACCESS_KEY}",
"secretKey": "\\{YOUR_WASABI_SECRET_KEY}"
}
}
},
Adding Support in Zenko Orbit
#############################
This can only be done by our core developpers' team. If thats what youre
after, open a feature request on the `Zenko repository`_, and we will
get back to you after we evaluate feasability and maintainability.
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new

View File

@ -1,11 +1,12 @@
Scality Zenko CloudServer
==================
=========================
.. _user-docs:
.. toctree::
:maxdepth: 2
:caption: Documentation
:glob:
CONTRIBUTING
GETTING_STARTED
@ -14,3 +15,4 @@ Scality Zenko CloudServer
DOCKER
INTEGRATIONS
ARCHITECTURE
developers/*

2
docs/requirements.in Normal file
View File

@ -0,0 +1,2 @@
Sphinx >= 1.7.5
recommonmark >= 0.4.0

119
docs/requirements.txt Normal file
View File

@ -0,0 +1,119 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# tox -e pip-compile
#
alabaster==0.7.12 \
--hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
--hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 \
# via sphinx
babel==2.6.0 \
--hash=sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669 \
--hash=sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23 \
# via sphinx
certifi==2018.10.15 \
--hash=sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c \
--hash=sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a \
# via requests
chardet==3.0.4 \
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
# via requests
commonmark==0.5.4 \
--hash=sha256:34d73ec8085923c023930dfc0bcd1c4286e28a2a82de094bb72fabcc0281cbe5 \
# via recommonmark
docutils==0.14 \
--hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
--hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
--hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \
# via recommonmark, sphinx
idna==2.7 \
--hash=sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e \
--hash=sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16 \
# via requests
imagesize==1.1.0 \
--hash=sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8 \
--hash=sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5 \
# via sphinx
jinja2==2.10 \
--hash=sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd \
--hash=sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4 \
# via sphinx
markupsafe==1.1.0 \
--hash=sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432 \
--hash=sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b \
--hash=sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9 \
--hash=sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af \
--hash=sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834 \
--hash=sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd \
--hash=sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d \
--hash=sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7 \
--hash=sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b \
--hash=sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3 \
--hash=sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c \
--hash=sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2 \
--hash=sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7 \
--hash=sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36 \
--hash=sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1 \
--hash=sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e \
--hash=sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1 \
--hash=sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c \
--hash=sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856 \
--hash=sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550 \
--hash=sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492 \
--hash=sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672 \
--hash=sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401 \
--hash=sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6 \
--hash=sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6 \
--hash=sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c \
--hash=sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd \
--hash=sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1 \
# via jinja2
packaging==18.0 \
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
# via sphinx
pygments==2.2.0 \
--hash=sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d \
--hash=sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc \
# via sphinx
pyparsing==2.3.0 \
--hash=sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b \
--hash=sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592 \
# via packaging
pytz==2018.7 \
--hash=sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca \
--hash=sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6 \
# via babel
recommonmark==0.4.0 \
--hash=sha256:6e29c723abcf5533842376d87c4589e62923ecb6002a8e059eb608345ddaff9d \
--hash=sha256:cd8bf902e469dae94d00367a8197fb7b81fcabc9cfb79d520e0d22d0fbeaa8b7
requests==2.20.1 \
--hash=sha256:65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54 \
--hash=sha256:ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263 \
# via sphinx
six==1.11.0 \
--hash=sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9 \
--hash=sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb \
# via packaging, sphinx
snowballstemmer==1.2.1 \
--hash=sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128 \
--hash=sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89 \
# via sphinx
sphinx==1.8.2 \
--hash=sha256:120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea \
--hash=sha256:b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64
sphinxcontrib-websupport==1.1.0 \
--hash=sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd \
--hash=sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9 \
# via sphinx
typing==3.6.6 \
--hash=sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d \
--hash=sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4 \
--hash=sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a \
# via sphinx
urllib3==1.24.1 \
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
# via requests

46
examples/go-md-search.go Normal file
View File

@ -0,0 +1,46 @@
package main
import (
"fmt"
"time"
"bytes"
"net/http"
"net/url"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/signer/v4"
)
func main() {
// Input AWS access key, secret key
aws_access_key_id := "accessKey1"
aws_secret_access_key := "verySecretKey1"
endpoint := "http://localhost:8000"
bucket_name := "bucketname"
searchQuery := url.QueryEscape("x-amz-meta-color=blue")
buf := bytes.NewBuffer([]byte{})
requestUrl := fmt.Sprintf("%s/%s?search=%s",
endpoint, bucket_name, searchQuery)
request, err := http.NewRequest("GET", requestUrl, buf)
if err != nil {
panic(err)
}
reader := bytes.NewReader(buf.Bytes())
credentials := credentials.NewStaticCredentials(aws_access_key_id,
aws_secret_access_key, "")
signer := v4.NewSigner(credentials)
signer.Sign(request, reader, "s3", "us-east-1", time.Now())
client := &http.Client{}
resp, err := client.Do(request)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Println(string(body))
}

View File

@ -0,0 +1,28 @@
const { S3 } = require('aws-sdk');
const config = {
sslEnabled: false,
endpoint: 'http://127.0.0.1:8000',
signatureCache: false,
signatureVersion: 'v4',
region: 'us-east-1',
s3ForcePathStyle: true,
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
};
const s3Client = new S3(config);
const encodedSearch =
encodeURIComponent('x-amz-meta-color="blue"');
const req = s3Client.listObjects({ Bucket: 'bucketname' });
// the build event
req.on('build', () => {
req.httpRequest.path = `${req.httpRequest.path}?search=${encodedSearch}`;
});
req.on('success', res => {
process.stdout.write(`Result ${res.data}`);
});
req.on('error', err => {
process.stdout.write(`Error ${err}`);
});
req.send();

View File

@ -0,0 +1,79 @@
import datetime
import hashlib
import hmac
import urllib
# pip install requests
import requests
access_key = 'accessKey1'
secret_key = 'verySecretKey1'
method = 'GET'
service = 's3'
host = 'localhost:8000'
region = 'us-east-1'
canonical_uri = '/bucketname'
query = 'x-amz-meta-color=blue'
canonical_querystring = 'search=%s' % (urllib.quote(query))
algorithm = 'AWS4-HMAC-SHA256'
t = datetime.datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
payload_hash = hashlib.sha256('').hexdigest()
canonical_headers = \
'host:{0}\nx-amz-content-sha256:{1}\nx-amz-date:{2}\n' \
.format(host, payload_hash, amz_date)
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash)
print(canonical_request)
credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service)
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
.format(algorithm, amz_date, credential_scope,
hashlib.sha256(canonical_request).hexdigest())
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
hashlib.sha256).hexdigest()
authorization_header = \
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
.format(algorithm, access_key, credential_scope, signed_headers, signature)
# The 'host' header is added automatically by the Python 'requests' library.
headers = {
'X-Amz-Content-Sha256': payload_hash,
'X-Amz-Date': amz_date,
'Authorization': authorization_header
}
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
r = requests.get(endpoint, headers=headers)
print(r.text)

View File

@ -14,8 +14,10 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all
yarn cache clean --all && \
yarn global remove typescript
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!

File diff suppressed because it is too large Load Diff

View File

@ -7,7 +7,8 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
const bucketDeletePolicy = require('./bucketDeletePolicy');
const bucketGet = require('./bucketGet');
const bucketDeleteQuota = require('./bucketDeleteQuota');
const { bucketGet } = require('./bucketGet');
const bucketGetACL = require('./bucketGetACL');
const bucketGetCors = require('./bucketGetCors');
const bucketGetVersioning = require('./bucketGetVersioning');
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
const bucketGetNotification = require('./bucketGetNotification');
const bucketGetObjectLock = require('./bucketGetObjectLock');
const bucketGetPolicy = require('./bucketGetPolicy');
const bucketGetQuota = require('./bucketGetQuota');
const bucketGetEncryption = require('./bucketGetEncryption');
const bucketHead = require('./bucketHead');
const { bucketPut } = require('./bucketPut');
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
const bucketPutEncryption = require('./bucketPutEncryption');
const bucketPutPolicy = require('./bucketPutPolicy');
const bucketPutObjectLock = require('./bucketPutObjectLock');
const bucketUpdateQuota = require('./bucketUpdateQuota');
const bucketGetReplication = require('./bucketGetReplication');
const bucketDeleteReplication = require('./bucketDeleteReplication');
const corsPreflight = require('./corsPreflight');
@ -40,10 +43,11 @@ const completeMultipartUpload = require('./completeMultipartUpload');
const initiateMultipartUpload = require('./initiateMultipartUpload');
const listMultipartUploads = require('./listMultipartUploads');
const listParts = require('./listParts');
const metadataSearch = require('./metadataSearch');
const { multiObjectDelete } = require('./multiObjectDelete');
const multipartDelete = require('./multipartDelete');
const objectCopy = require('./objectCopy');
const objectDelete = require('./objectDelete');
const { objectDelete } = require('./objectDelete');
const objectDeleteTagging = require('./objectDeleteTagging');
const objectGet = require('./objectGet');
const objectGetACL = require('./objectGetACL');
@ -58,6 +62,7 @@ const objectPutTagging = require('./objectPutTagging');
const objectPutPart = require('./objectPutPart');
const objectPutCopyPart = require('./objectPutCopyPart');
const objectPutRetention = require('./objectPutRetention');
const objectRestore = require('./objectRestore');
const prepareRequestContexts
= require('./apiUtils/authorization/prepareRequestContexts');
const serviceGet = require('./serviceGet');
@ -67,6 +72,7 @@ const writeContinue = require('../utilities/writeContinue');
const validateQueryAndHeaders = require('../utilities/validateQueryAndHeaders');
const parseCopySource = require('./apiUtils/object/parseCopySource');
const { tagConditionKeyAuth } = require('./apiUtils/authorization/tagConditionKeys');
const { isRequesterASessionUser } = require('./apiUtils/authorization/permissionChecks');
const checkHttpHeadersSize = require('./apiUtils/object/checkHttpHeadersSize');
const monitoringMap = policies.actionMaps.actionMonitoringMapS3;
@ -79,6 +85,10 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
@ -187,21 +197,27 @@ const api = {
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(err);
return next(arsenalError);
}
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, next) => {
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
}
if (isRequesterASessionUser(userInfo)) {
authNames.sessionName = userInfo.getShortid().split(':')[1];
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
}
// issue 100 Continue to the client
writeContinue(request, response);
@ -232,12 +248,12 @@ const api = {
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params);
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
@ -248,13 +264,14 @@ const api = {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params);
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
},
),
], (err, userInfo, authorizationResults, streamingV4Params) => {
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
if (err) {
return callback(err);
}
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) {
@ -271,19 +288,23 @@ const api = {
return acc;
}, {});
}
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params,
log, callback, authorizationResults);
log, methodCallback, authorizationResults);
}
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, callback);
sourceObject, sourceVersionId, log, methodCallback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, callback);
return this[apiMethod](userInfo, request, log, methodCallback);
});
},
bucketDelete,
@ -304,18 +325,21 @@ const api = {
bucketPutCors,
bucketPutVersioning,
bucketPutTagging,
bucketDeleteTagging,
bucketGetTagging,
bucketPutWebsite,
bucketPutReplication,
bucketGetReplication,
bucketDeleteReplication,
bucketDeleteQuota,
bucketPutLifecycle,
bucketUpdateQuota,
bucketGetLifecycle,
bucketDeleteLifecycle,
bucketPutPolicy,
bucketGetPolicy,
bucketGetQuota,
bucketDeletePolicy,
bucketDeleteTagging,
bucketPutObjectLock,
bucketPutNotification,
bucketGetNotification,
@ -325,6 +349,7 @@ const api = {
initiateMultipartUpload,
listMultipartUploads,
listParts,
metadataSearch,
multiObjectDelete,
multipartDelete,
objectDelete,
@ -343,6 +368,7 @@ const api = {
objectPutPart,
objectPutCopyPart,
objectPutRetention,
objectRestore,
serviceGet,
websiteGet: website,
websiteHead: website,

View File

@ -5,8 +5,13 @@ const constants = require('../../../../constants');
const { config } = require('../../../Config');
const {
allAuthedUsersId, bucketOwnerActions, logId, publicId,
assumedRoleArnResourceType, backbeatLifecycleSessionName, arrayOfAllowed,
allAuthedUsersId,
bucketOwnerActions,
logId,
publicId,
arrayOfAllowed,
assumedRoleArnResourceType,
backbeatLifecycleSessionName,
actionsToConsiderAsObjectPut,
} = constants;
@ -14,6 +19,25 @@ const {
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS
? process.env.ALLOW_PUBLIC_READ_BUCKETS.split(',') : [];
function getServiceAccountProperties(canonicalID) {
const canonicalIDArray = canonicalID.split('/');
const serviceName = canonicalIDArray[canonicalIDArray.length - 1];
return constants.serviceAccountProperties[serviceName];
}
function isServiceAccount(canonicalID) {
return getServiceAccountProperties(canonicalID) !== undefined;
}
function isRequesterASessionUser(authInfo) {
const regexpAssumedRoleArn = /^arn:aws:sts::[0-9]{12}:assumed-role\/.*$/;
return regexpAssumedRoleArn.test(authInfo.getArn());
}
function isRequesterNonAccountUser(authInfo) {
return authInfo.isRequesterAnIAMUser() || isRequesterASessionUser(authInfo);
}
/**
* Checks the access control for a given bucket based on the request type and user's canonical ID.
*
@ -364,11 +388,11 @@ function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, lo
let requesterIsNotUser = true;
let arn = null;
if (authInfo) {
requesterIsNotUser = !authInfo.isRequesterAnIAMUser();
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
}
// if the bucket owner is an account, users should not have default access
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser) {
if ((bucket.getOwner() === canonicalID) && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
@ -439,11 +463,11 @@ function isObjAuthorized(bucket, objectMD, requestTypesInput, canonicalID, authI
let arn = null;
let isUserUnauthenticated = false;
if (authInfo) {
requesterIsNotUser = !authInfo.isRequesterAnIAMUser();
requesterIsNotUser = !isRequesterNonAccountUser(authInfo);
arn = authInfo.getArn();
isUserUnauthenticated = arn === undefined;
}
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser) {
if (objectMD['owner-id'] === canonicalID && requesterIsNotUser || isServiceAccount(canonicalID)) {
results[_requestType] = actionImplicitDenies[_requestType] === false;
return results[_requestType];
}
@ -514,7 +538,7 @@ function checkIp(value) {
}
// Apply the existing IP validation logic to each element
const validateIpRegex = (ip) => {
const validateIpRegex = ip => {
if (constants.ipv4Regex.test(ip)) {
return ip.split('.').every(part => parseInt(part, 10) <= 255);
}
@ -553,6 +577,7 @@ function validatePolicyConditions(policy) {
if (s.Condition) {
const conditionOperators = Object.keys(s.Condition);
// there can be multiple condition operations in the Condition enclosure
// eslint-disable-next-line no-restricted-syntax
for (const conditionOperator of conditionOperators) {
const conditionKey = Object.keys(s.Condition[conditionOperator])[0];
const conditionValue = s.Condition[conditionOperator][conditionKey];
@ -603,6 +628,10 @@ function isLifecycleSession(arn) {
module.exports = {
isBucketAuthorized,
isObjAuthorized,
getServiceAccountProperties,
isServiceAccount,
isRequesterASessionUser,
isRequesterNonAccountUser,
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
if (apiMethod === 'bucketPut') {
return null;
}
@ -65,7 +65,17 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';
@ -147,6 +157,13 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
generateRequestContext('objectGet');
requestContexts.push(getObjectRequestContext);
} else if (apiMethodAfterVersionCheck === 'objectPut') {
// if put object with version
if (request.headers['x-scal-s3-version-id'] ||
request.headers['x-scal-s3-version-id'] === '') {
const putVersionRequestContext =
generateRequestContext('objectPutVersion');
requestContexts.push(putVersionRequestContext);
} else {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
@ -161,6 +178,38 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
generateRequestContext('objectPutLegalHold');
requestContexts.push(putLegalHoldStatusAction);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =
generateRequestContext('objectPutACL');
requestContexts.push(putAclRequestContext);
}
if (request.headers['x-amz-object-lock-mode']) {
const putObjectLockRequestContext =
generateRequestContext('objectPutRetention');
requestContexts.push(putObjectLockRequestContext);
}
if (request.headers['x-amz-version-id']) {
const putObjectVersionRequestContext =
generateRequestContext('objectPutTaggingVersion');
requestContexts.push(putObjectVersionRequestContext);
}
}
} else if (apiMethodAfterVersionCheck === 'initiateMultipartUpload' ||
apiMethodAfterVersionCheck === 'objectPutPart' ||
apiMethodAfterVersionCheck === 'completeMultipartUpload'
) {
if (request.headers['x-scal-s3-version-id'] ||
request.headers['x-scal-s3-version-id'] === '') {
const putVersionRequestContext =
generateRequestContext('objectPutVersion');
requestContexts.push(putVersionRequestContext);
} else {
const putRequestContext =
generateRequestContext(apiMethodAfterVersionCheck);
requestContexts.push(putRequestContext);
}
// if put object (versioning) with ACL
if (isHeaderAcl(request.headers)) {
const putAclRequestContext =

View File

@ -60,6 +60,8 @@ function updateRequestContextsWithTags(request, requestContexts, apiMethod, log,
log.trace('error processing tag condition key evaluation');
return cb(err);
}
// FIXME introduced by CLDSRV-256, this syntax should be allowed by the linter
// eslint-disable-next-line no-restricted-syntax
for (const rc of requestContexts) {
rc.setNeedTagEval(true);
if (requestTagsQuery) {

View File

@ -10,13 +10,15 @@ const { parseBucketEncryptionHeaders } = require('./bucketEncryption');
const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
const { isServiceAccount } = require('../authorization/permissionChecks');
const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket;
const zenkoSeparator = constants.zenkoSeparator;
const userBucketOwner = 'admin';
function addToUsersBucket(canonicalID, bucketName, log, cb) {
function addToUsersBucket(canonicalID, bucketName, bucketMD, log, cb) {
// BACKWARD: Simplify once do not have to deal with old
// usersbucket name and old splitter
@ -28,7 +30,10 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
const splitter = usersBucketAttrs ?
constants.splitter : constants.oldSplitter;
let key = createKeyForUserBucket(canonicalID, splitter, bucketName);
const omVal = { creationDate: new Date().toJSON() };
const omVal = {
creationDate: new Date().toJSON(),
ingestion: bucketMD.getIngestion(),
};
// If the new format usersbucket does not exist, try to put the
// key in the old usersBucket using the old splitter.
// Otherwise put the key in the new format usersBucket
@ -36,7 +41,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
usersBucket : oldUsersBucket;
return metadata.putObjectMD(usersBucketBeingCalled, key,
omVal, {}, log, err => {
if (err && err.is.NoSuchBucket) {
if (err?.is?.NoSuchBucket) {
// There must be no usersBucket so createBucket
// one using the new format
log.trace('users bucket does not exist, ' +
@ -91,7 +96,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
return callback(err);
}
log.trace('created bucket in metadata');
return addToUsersBucket(canonicalID, bucketName, log, err => {
return addToUsersBucket(canonicalID, bucketName, bucket, log, err => {
if (err) {
return callback(err);
}
@ -114,7 +119,7 @@ function freshStartCreateBucket(bucket, canonicalID, log, callback) {
*/
function cleanUpBucket(bucketMD, canonicalID, log, callback) {
const bucketName = bucketMD.getName();
return addToUsersBucket(canonicalID, bucketName, log, err => {
return addToUsersBucket(canonicalID, bucketName, bucketMD, log, err => {
if (err) {
return callback(err);
}
@ -165,15 +170,28 @@ function createBucket(authInfo, bucketName, headers,
const ownerDisplayName =
authInfo.getAccountDisplayName();
const creationDate = new Date().toJSON();
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
const headerObjectLock = headers['x-amz-bucket-object-lock-enabled'];
const objectLockEnabled
= headerObjectLock && headerObjectLock.toLowerCase() === 'true';
const bucket = new BucketInfo(bucketName, canonicalID, ownerDisplayName,
creationDate, BucketInfo.currentModelVersion(), null, null, null, null,
null, null, null, null, null, null, null, null, objectLockEnabled);
null, null, null, null, null, null, null, null, null, isNFSEnabled,
null, null, objectLockEnabled);
let locationConstraintVal = null;
if (locationConstraint !== undefined) {
bucket.setLocationConstraint(locationConstraint);
if (locationConstraint) {
const [locationConstraintStr, ingestion] =
locationConstraint.split(zenkoSeparator);
if (locationConstraintStr) {
locationConstraintVal = locationConstraintStr;
bucket.setLocationConstraint(locationConstraintStr);
}
if (ingestion === 'ingest') {
bucket.enableIngestion();
//automatically enable versioning for ingestion buckets
bucket.setVersioningConfiguration({ Status: 'Enabled' });
}
}
if (objectLockEnabled) {
// default versioning configuration AWS sets
@ -223,7 +241,8 @@ function createBucket(authInfo, bucketName, headers,
}
const existingBucketMD = results.getAnyExistingBucketInfo;
if (existingBucketMD instanceof BucketInfo &&
existingBucketMD.getOwner() !== canonicalID) {
existingBucketMD.getOwner() !== canonicalID &&
!isServiceAccount(canonicalID)) {
// return existingBucketMD to collect cors headers
return cb(errors.BucketAlreadyExists, existingBucketMD);
}
@ -254,7 +273,7 @@ function createBucket(authInfo, bucketName, headers,
// error unless old AWS behavior (us-east-1)
// Existing locationConstraint must have legacyAwsBehavior === true
// New locationConstraint should have legacyAwsBehavior === true
if (isLegacyAWSBehavior(locationConstraint) &&
if (isLegacyAWSBehavior(locationConstraintVal) &&
isLegacyAWSBehavior(existingBucketMD.getLocationConstraint())) {
log.trace('returning 200 instead of 409 to mirror us-east-1');
return cb(null, existingBucketMD);

View File

@ -93,7 +93,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, request, log,
log, (err, objectsListRes) => {
// If no shadow bucket ever created, no ongoing MPU's, so
// continue with deletion
if (err && err.is.NoSuchBucket) {
if (err?.is.NoSuchBucket) {
return next();
}
if (err) {

View File

@ -0,0 +1,26 @@
const { errors } = require('arsenal');
function checkPreferredLocations(location, locationConstraints, log) {
const retError = loc => {
const errMsg = 'value of the location you are attempting to set - ' +
`${loc} - is not listed in the locationConstraint config`;
log.trace(`locationConstraint is invalid - ${errMsg}`,
{ locationConstraint: loc });
return errors.InvalidLocationConstraint.customizeDescription(errMsg);
};
if (typeof location === 'string' && !locationConstraints[location]) {
return retError(location);
}
if (typeof location === 'object') {
const { read, write } = location;
if (!locationConstraints[read]) {
return retError(read);
}
if (!locationConstraints[write]) {
return retError(write);
}
}
return null;
}
module.exports = checkPreferredLocations;

View File

@ -11,11 +11,11 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
// If the object representing the bucket is not in the
// users bucket just continue
if (error && error.is.NoSuchKey) {
if (error?.is.NoSuchKey) {
return cb(null);
// BACKWARDS COMPATIBILITY: Remove this once no longer
// have old user bucket format
} else if (error && error.NoSuchBucket) {
} else if (error?.is.NoSuchBucket) {
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
oldSplitter, bucketName);
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,

View File

@ -15,11 +15,21 @@ function getNotificationConfiguration(parsedXml) {
}
const targets = new Set(config.bucketNotificationDestinations.map(t => t.resource));
const notifConfigTargets = notifConfig.queueConfig.map(t => t.queueArn.split(':')[5]);
if (!notifConfigTargets.every(t => targets.has(t))) {
// TODO: match the error message to AWS's response along with
// the request destination name in the response
const errDesc = 'Unable to validate the destination configuration';
return { error: errors.InvalidArgument.customizeDescription(errDesc) };
// getting invalid targets
const invalidTargets = [];
notifConfigTargets.forEach((t, i) => {
if (!targets.has(t)) {
invalidTargets.push({
ArgumentName: notifConfig.queueConfig[i].queueArn,
ArgumentValue: 'The destination queue does not exist',
});
}
});
if (invalidTargets.length > 0) {
const errDesc = 'Unable to validate the following destination configurations';
let error = errors.InvalidArgument.customizeDescription(errDesc);
error = error.addMetadataEntry('invalidArguments', invalidTargets);
return { error };
}
return notifConfig;
}

View File

@ -0,0 +1,19 @@
/**
* parse LIKE expressions
* @param {string} regex - regex pattern
* @return {object} MongoDB search object
*/
function parseLikeExpression(regex) {
if (typeof regex !== 'string') {
return null;
}
const split = regex.split('/');
if (split.length < 3 || split[0] !== '') {
return { $regex: regex };
}
const pattern = split.slice(1, split.length - 1).join('/');
const regexOpt = split[split.length - 1];
return { $regex: new RegExp(pattern), $options: regexOpt };
}
module.exports = parseLikeExpression;

View File

@ -0,0 +1,85 @@
const parseLikeExpression = require('./parseLikeExpression');
/*
This code is based on code from https://github.com/olehch/sqltomongo
with the following license:
The MIT License (MIT)
Copyright (c) 2016 Oleh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/**
* A helper object to map SQL-like naming to MongoDB query syntax
*/
const exprMapper = {
'=': '$eq',
'!=': '$ne',
'<>': '$ne',
'>': '$gt',
'<': '$lt',
'>=': '$gte',
'<=': '$lte',
'LIKE': '$regex',
};
/*
* Parses object with WHERE clause recursively
* and generates MongoDB `find` query object
*/
function parseWhere(root) {
const operator = Object.keys(root)[0];
// extract leaf binary expressions
if (operator === 'AND') {
const e1 = parseWhere(root[operator][0]);
const e2 = parseWhere(root[operator][1]);
// eslint-disable-next-line
return { '$and' : [
e1,
e2,
] };
} else if (operator === 'OR') {
const e1 = parseWhere(root[operator][0]);
const e2 = parseWhere(root[operator][1]);
// eslint-disable-next-line
return { '$or' : [
e1,
e2,
] };
}
const field = root[operator][0];
const value = root[operator][1];
const expr = exprMapper[operator];
const obj = {};
if (operator === 'LIKE') {
obj[`value.${field}`] = parseLikeExpression(value);
} else {
obj[`value.${field}`] = { [expr]: value };
}
return obj;
}
module.exports = parseWhere;

View File

@ -0,0 +1,32 @@
const config = require('../../../Config').config;
/**
* Validates that the replication configuration contains a preferred
* read location if the bucket location is a transient source
*
* @param {object} repConfig - replication configuration
* @param {object} bucket - bucket metadata
*
* @return {boolean} validity of replication configuration with
* transient source
*/
function validateReplicationConfig(repConfig, bucket) {
const bucketLocationName = bucket.getLocationConstraint();
if (!repConfig || !repConfig.rules) {
return false;
}
const bucketLocation = config.locationConstraints[bucketLocationName];
if (!bucketLocation.isTransient) {
return true;
}
return repConfig.rules.every(rule => {
if (!rule.storageClass) {
return true;
}
const storageClasses = rule.storageClass.split(',');
return storageClasses.some(
site => site.endsWith(':preferred_read'));
});
}
module.exports = validateReplicationConfig;

View File

@ -0,0 +1,96 @@
const Parser = require('sql-where-parser');
const { errors } = require('arsenal');
const objModel = require('arsenal').models.ObjectMD;
const BINARY_OP = 2;
const sqlConfig = {
operators: [
{
'=': BINARY_OP,
'<': BINARY_OP,
'>': BINARY_OP,
'<>': BINARY_OP,
'<=': BINARY_OP,
'>=': BINARY_OP,
'!=': BINARY_OP,
},
{ LIKE: BINARY_OP },
{ AND: BINARY_OP },
{ OR: BINARY_OP },
],
tokenizer: {
shouldTokenize: ['(', ')', '=', '!=', '<', '>', '<=', '>=', '<>'],
shouldMatch: ['"', '\'', '`'],
shouldDelimitBy: [' ', '\n', '\r', '\t'],
},
};
const parser = new Parser(sqlConfig);
function _validateTree(whereClause, possibleAttributes) {
let invalidAttribute;
function _searchTree(node) {
if (typeof node !== 'object') {
invalidAttribute = node;
} else {
const operator = Object.keys(node)[0];
if (operator === 'AND' || operator === 'OR') {
_searchTree(node[operator][0]);
_searchTree(node[operator][1]);
} else {
const field = node[operator][0];
if (!field.startsWith('tags.') &&
!possibleAttributes[field] &&
!field.startsWith('replicationInfo.') &&
!field.startsWith('x-amz-meta-')) {
invalidAttribute = field;
}
}
}
}
_searchTree(whereClause);
return invalidAttribute;
}
/**
* validateSearchParams - validate value of ?search= in request
* @param {string} searchParams - value of search params in request
* which should be jsu sql where clause
* For metadata: x-amz-meta-color=\"blue\"
* For tags: tags.x-amz-meta-color=\"blue\"
* For replication status : replication-status=\"PENDING\"
* For any other attribute: `content-length`=5
* @return {undefined | error} undefined if validates or arsenal error if not
*/
function validateSearchParams(searchParams) {
let ast;
try {
// allow using 'replicationStatus' as search param to increase
// ease of use, pending metadata search rework
// eslint-disable-next-line no-param-reassign
searchParams = searchParams.replace(
'replication-status', 'replicationInfo.status');
ast = parser.parse(searchParams);
} catch (e) {
if (e) {
return {
error: errors.InvalidArgument
.customizeDescription('Invalid sql where clause ' +
'sent as search query'),
};
}
}
const possibleAttributes = objModel.getAttributes();
const invalidAttribute = _validateTree(ast, possibleAttributes);
if (invalidAttribute) {
return {
error: errors.InvalidArgument
.customizeDescription('Search param ' +
`contains unknown attribute: ${invalidAttribute}`) };
}
return {
ast,
};
}
module.exports = validateSearchParams;

View File

@ -1,233 +0,0 @@
const { config } = require('../../../Config');
const { legacyLocations } = require('../../../../constants.js');
const { locationConstraints } = config;
const escapeForXml = require('arsenal').s3middleware.escapeForXml;
class BackendInfo {
/**
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @constructor
* @param {string | undefined} objectLocationConstraint - location constraint
* for object based on user meta header
* @param {string | undefined } bucketLocationConstraint - location
* constraint for bucket based on bucket metadata
* @param {string} requestEndpoint - endpoint to which request was made
* @param {string | undefined } legacyLocationConstraint - legacy location
* constraint
*/
constructor(objectLocationConstraint, bucketLocationConstraint,
requestEndpoint, legacyLocationConstraint) {
this._objectLocationConstraint = objectLocationConstraint;
this._bucketLocationConstraint = bucketLocationConstraint;
this._requestEndpoint = requestEndpoint;
this._legacyLocationConstraint = legacyLocationConstraint;
return this;
}
/**
* validate proposed location constraint against config
* @param {string | undefined} locationConstraint - value of user
* metadata location constraint header or bucket location constraint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
*/
static isValidLocationConstraint(locationConstraint, log) {
if (Object.keys(config.locationConstraints).
indexOf(locationConstraint) < 0) {
log.trace('proposed locationConstraint is invalid',
{ locationConstraint });
return false;
}
return true;
}
/**
* validate that request endpoint is listed in the restEndpoint config
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if present, false if not
*/
static isRequestEndpointPresent(requestEndpoint, log) {
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
log.trace('requestEndpoint does not match config restEndpoints',
{ requestEndpoint });
return false;
}
return true;
}
/**
* validate that locationConstraint for request Endpoint matches
* one config locationConstraint
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if matches, false if not
*/
static isRequestEndpointValueValid(requestEndpoint, log) {
if (Object.keys(config.locationConstraints).indexOf(config
.restEndpoints[requestEndpoint]) < 0) {
log.trace('the default locationConstraint for request' +
'Endpoint does not match any config locationConstraint',
{ requestEndpoint });
return false;
}
return true;
}
/**
* validate that s3 server is running with a file or memory backend
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if running with file/mem backend, false if not
*/
static isMemOrFileBackend(requestEndpoint, log) {
if (config.backends.data === 'mem' ||
config.backends.data === 'file') {
log.trace('use data backend for the location', {
dataBackend: config.backends.data,
method: 'isMemOrFileBackend',
});
return true;
}
return false;
}
/**
* validate requestEndpoint against config or mem/file data backend
* - if there is no match for the request endpoint in the config
* restEndpoints and data backend is set to mem or file we will use this
* data backend for the location.
* - if locationConstraint for request Endpoint does not match
* any config locationConstraint, we will return an error
* @param {string} requestEndpoint - request endpoint
* @param {object} log - werelogs logger
* @return {boolean} - true if valid, false if not
*/
static isValidRequestEndpointOrBackend(requestEndpoint, log) {
if (!BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
return BackendInfo.isMemOrFileBackend(requestEndpoint, log);
}
return BackendInfo.isRequestEndpointValueValid(requestEndpoint, log);
}
/**
* validate controlling BackendInfo Parameter
* @param {string | undefined} objectLocationConstraint - value of user
* metadata location constraint header
* @param {string | null} bucketLocationConstraint - location
* constraint from bucket metadata
* @param {string} requestEndpoint - endpoint of request
* @param {object} log - werelogs logger
* @return {object} - location constraint validity
*/
static controllingBackendParam(objectLocationConstraint,
bucketLocationConstraint, requestEndpoint, log) {
if (objectLocationConstraint) {
if (BackendInfo.isValidLocationConstraint(objectLocationConstraint,
log)) {
log.trace('objectLocationConstraint is valid');
return { isValid: true };
}
log.trace('objectLocationConstraint is invalid');
return { isValid: false, description: 'Object Location Error - ' +
`Your object location "${escapeForXml(objectLocationConstraint)}"` +
'is not in your location config - Please update.' };
}
if (bucketLocationConstraint) {
if (BackendInfo.isValidLocationConstraint(bucketLocationConstraint,
log)) {
log.trace('bucketLocationConstraint is valid');
return { isValid: true };
}
log.trace('bucketLocationConstraint is invalid');
return { isValid: false, description: 'Bucket Location Error - ' +
`Your bucket location "${escapeForXml(bucketLocationConstraint)}"` +
' is not in your location config - Please update.' };
}
const legacyLocationConstraint =
BackendInfo.getLegacyLocationConstraint();
if (legacyLocationConstraint) {
log.trace('legacy location is valid');
return { isValid: true, legacyLocationConstraint };
}
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
log)) {
return { isValid: false, description: 'Endpoint Location Error - ' +
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
'in your config OR the default location constraint for request ' +
`endpoint "${escapeForXml(requestEndpoint)}" does not ` +
'match any config locationConstraint - Please update.' };
}
if (BackendInfo.isRequestEndpointPresent(requestEndpoint, log)) {
return { isValid: true };
}
return { isValid: true, defaultedToDataBackend: true };
}
/**
* Return legacyLocationConstraint
* @return {string | undefined} legacyLocationConstraint;
*/
static getLegacyLocationConstraint() {
return legacyLocations.find(ll => locationConstraints[ll]);
}
/**
* Return objectLocationConstraint
* @return {string | undefined} objectLocationConstraint;
*/
getObjectLocationConstraint() {
return this._objectLocationConstraint;
}
/**
* Return bucketLocationConstraint
* @return {string | undefined} bucketLocationConstraint;
*/
getBucketLocationConstraint() {
return this._bucketLocationConstraint;
}
/**
* Return requestEndpoint
* @return {string} requestEndpoint;
*/
getRequestEndpoint() {
return this._requestEndpoint;
}
/**
* Return locationConstraint that should be used with put request
* Order of priority is:
* (1) objectLocationConstraint,
* (2) bucketLocationConstraint,
* (3) legacyLocationConstraint,
* (4) default locationConstraint for requestEndpoint if requestEndpoint
* is listed in restEndpoints in config.json
* (5) default data backend
* @return {string} locationConstraint;
*/
getControllingLocationConstraint() {
const objectLC = this.getObjectLocationConstraint();
const bucketLC = this.getBucketLocationConstraint();
const reqEndpoint = this.getRequestEndpoint();
if (objectLC) {
return objectLC;
}
if (bucketLC) {
return bucketLC;
}
if (this._legacyLocationConstraint) {
return this._legacyLocationConstraint;
}
if (config.restEndpoints[reqEndpoint]) {
return config.restEndpoints[reqEndpoint];
}
return config.backends.data;
}
}
module.exports = {
BackendInfo,
};

View File

@ -73,15 +73,6 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
return next(null, mpuBucket, destBucket, skipDataDelete);
});
},
function sendAbortPut(mpuBucket, destBucket, skipDataDelete, next) {
services.sendAbortMPUPut(bucketName, objectKey, uploadId, log,
err => {
if (err) {
return next(err, destBucket);
}
return next(null, mpuBucket, destBucket, skipDataDelete);
});
},
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
next) {
services.getMPUparts(mpuBucket.getName(), uploadId, log,
@ -96,7 +87,6 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
},
function deleteData(mpuBucket, storedParts, destBucket,
skipDataDelete, next) {
// for Azure we do not need to delete data
if (skipDataDelete) {
return next(null, mpuBucket, storedParts, destBucket);
}

View File

@ -0,0 +1,19 @@
const { zenkoIDHeader } = require('arsenal').constants;
const _config = require('../../../Config').config;
/**
* applyZenkoUserMD - if request is within a Zenko deployment, apply user
* metadata called "zenko-source" to the object
* @param {Object} metaHeaders - user metadata object
* @return {undefined}
*/
function applyZenkoUserMD(metaHeaders) {
if (process.env.REMOTE_MANAGEMENT_DISABLE === '0' &&
!metaHeaders[zenkoIDHeader]) {
// eslint-disable-next-line no-param-reassign
metaHeaders[zenkoIDHeader] = _config.getPublicInstanceId();
}
}
module.exports = applyZenkoUserMD;

View File

@ -0,0 +1,27 @@
/**
* checkReadLocation - verify that a bucket's default read location exists
* for a specified read data locator
* @param {Config} config - Config object
* @param {string} locationName - location constraint
* @param {string} objectKey - object key
* @param {string} bucketName - bucket name
* @return {Object | null} return object containing location information
* if location exists; otherwise, null
*/
function checkReadLocation(config, locationName, objectKey, bucketName) {
const readLocation = config.getLocationConstraint(locationName);
if (readLocation) {
const bucketMatch = readLocation.details &&
readLocation.details.bucketMatch;
const backendKey = bucketMatch ? objectKey :
`${bucketName}/${objectKey}`;
return {
location: locationName,
key: backendKey,
locationType: readLocation.type,
};
}
return null;
}
module.exports = checkReadLocation;

View File

@ -0,0 +1,247 @@
/*
* Code based on Yutaka Oishi (Fujifilm) contributions
* Date: 11 Sep 2020
*/
const { ObjectMDArchive } = require('arsenal').models;
const errors = require('arsenal').errors;
const { config } = require('../../../Config');
const { locationConstraints } = config;
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Get response header "x-amz-restore"
* Be called by objectHead.js
* @param {object} objMD - object's metadata
* @returns {string|undefined} x-amz-restore
*/
function getAmzRestoreResHeader(objMD) {
if (objMD.archive &&
objMD.archive.restoreRequestedAt &&
!objMD.archive.restoreCompletedAt) {
// Avoid race condition by relying on the `archive` MD of the object
// and return the right header after a RESTORE request.
// eslint-disable-next-line
return `ongoing-request="true"`;
}
if (objMD['x-amz-restore']) {
if (objMD['x-amz-restore']['expiry-date']) {
const utcDateTime = new Date(objMD['x-amz-restore']['expiry-date']).toUTCString();
// eslint-disable-next-line
return `ongoing-request="${objMD['x-amz-restore']['ongoing-request']}", expiry-date="${utcDateTime}"`;
}
}
return undefined;
}
/**
* Check if restore can be done.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if the conditions for RestoreObject are fulfilled
*/
function _validateStartRestore(objectMD, log) {
if (objectMD.archive?.restoreCompletedAt) {
if (new Date(objectMD.archive?.restoreWillExpireAt) < new Date(Date.now())) {
// return InvalidObjectState error if the restored object is expired
// but restore info md of this object has not yet been cleared
log.debug('The restored object already expired.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
// If object is already restored, no further check is needed
// Furthermore, we cannot check if the location is cold, as the `dataStoreName` would have
// been reset.
return undefined;
}
const isLocationCold = locationConstraints[objectMD.dataStoreName]?.isCold;
if (!isLocationCold) {
// return InvalidObjectState error if the object is not in cold storage,
// not in cold storage means either location cold flag not exists or cold flag is explicit false
log.debug('The bucket of the object is not in a cold storage location.',
{
isLocationCold,
method: '_validateStartRestore',
});
return errors.InvalidObjectState;
}
if (objectMD.archive?.restoreRequestedAt) {
// return RestoreAlreadyInProgress error if the object is currently being restored
// check if archive.restoreRequestAt exists and archive.restoreCompletedAt not yet exists
log.debug('The object is currently being restored.',
{
archive: objectMD.archive,
method: '_validateStartRestore',
});
return errors.RestoreAlreadyInProgress;
}
return undefined;
}
/**
* Check if "put version id" is allowed
*
* @param {ObjectMD} objMD - object metadata
* @param {string} versionId - object's version id
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} - undefined if "put version id" is allowed
*/
function validatePutVersionId(objMD, versionId, log) {
if (!objMD) {
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
log.error('error no object metadata found', { method: 'validatePutVersionId', versionId });
return err;
}
if (objMD.isDeleteMarker) {
log.error('version is a delete marker', { method: 'validatePutVersionId', versionId });
return errors.MethodNotAllowed;
}
const isLocationCold = locationConstraints[objMD.dataStoreName]?.isCold;
if (!isLocationCold) {
log.error('The object data is not stored in a cold storage location.',
{
isLocationCold,
dataStoreName: objMD.dataStoreName,
method: 'validatePutVersionId',
});
return errors.InvalidObjectState;
}
// make sure object archive restoration is in progress
// NOTE: we do not use putObjectVersion to update the restoration period.
if (!objMD.archive || !objMD.archive.restoreRequestedAt || !objMD.archive.restoreRequestedDays
|| objMD.archive.restoreCompletedAt || objMD.archive.restoreWillExpireAt) {
log.error('object archive restoration is not in progress',
{ method: 'validatePutVersionId', versionId });
return errors.InvalidObjectState;
}
return undefined;
}
/**
* Check if the object is already restored, and update the expiration date accordingly:
* > After restoring an archived object, you can update the restoration period by reissuing the
* > request with a new period. Amazon S3 updates the restoration period relative to the current
* > time.
*
* @param {ObjectMD} objectMD - object metadata
* @param {object} log - werelogs logger
* @return {boolean} - true if the object is already restored
*/
function _updateObjectExpirationDate(objectMD, log) {
// Check if restoreCompletedAt field exists
// Normally, we should check `archive.restoreWillExpireAt > current time`; however this is
// checked earlier in the process, so checking again here would create weird states
const isObjectAlreadyRestored = !!objectMD.archive.restoreCompletedAt;
log.debug('The restore status of the object.', {
isObjectAlreadyRestored,
method: 'isObjectAlreadyRestored'
});
if (isObjectAlreadyRestored) {
const expiryDate = new Date(objectMD.archive.restoreRequestedAt);
expiryDate.setTime(expiryDate.getTime() + (objectMD.archive.restoreRequestedDays * scaledMsPerDay));
/* eslint-disable no-param-reassign */
objectMD.archive.restoreWillExpireAt = expiryDate;
objectMD['x-amz-restore'] = {
'ongoing-request': false,
'expiry-date': expiryDate,
};
/* eslint-enable no-param-reassign */
}
return isObjectAlreadyRestored;
}
/**
* update restore expiration date.
*
* @param {ObjectMD} objectMD - objectMD instance
* @param {object} restoreParam - restore param
* @param {object} log - werelogs logger
* @return {ArsenalError|undefined} internal error if object MD is not valid
*
*/
function _updateRestoreInfo(objectMD, restoreParam, log) {
if (!objectMD.archive) {
log.debug('objectMD.archive doesn\'t exits', {
objectMD,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Archive metadata is missing.');
}
/* eslint-disable no-param-reassign */
objectMD.archive.restoreRequestedAt = new Date();
objectMD.archive.restoreRequestedDays = restoreParam.days;
objectMD.originOp = 's3:ObjectRestore:Post';
/* eslint-enable no-param-reassign */
if (!ObjectMDArchive.isValid(objectMD.archive)) {
log.debug('archive is not valid', {
archive: objectMD.archive,
method: '_updateRestoreInfo'
});
return errors.InternalError.customizeDescription('Invalid archive metadata.');
}
return undefined;
}
/**
* start to restore object.
* If not exist x-amz-restore, add it to objectMD.(x-amz-restore = false)
* calculate restore expiry-date and add it to objectMD.
* Be called by objectRestore.js
*
* @param {ObjectMD} objectMD - objectMd instance
* @param {object} restoreParam - bucket name
* @param {object} log - werelogs logger
* @param {function} cb - bucket name
* @return {undefined}
*
*/
function startRestore(objectMD, restoreParam, log, cb) {
log.info('Validating if restore can be done or not.');
const checkResultError = _validateStartRestore(objectMD, log);
if (checkResultError) {
return cb(checkResultError);
}
log.info('Updating restore information.');
const updateResultError = _updateRestoreInfo(objectMD, restoreParam, log);
if (updateResultError) {
return cb(updateResultError);
}
const isObjectAlreadyRestored = _updateObjectExpirationDate(objectMD, log);
return cb(null, isObjectAlreadyRestored);
}
/**
* checks if object data is available or if it's in cold storage
* @param {ObjectMD} objMD Object metadata
* @returns {ArsenalError|null} error if object data is not available
*/
function verifyColdObjectAvailable(objMD) {
// return error when object is cold
if (objMD.archive &&
// Object is in cold backend
(!objMD.archive.restoreRequestedAt ||
// Object is being restored
(objMD.archive.restoreRequestedAt && !objMD.archive.restoreCompletedAt))) {
const err = errors.InvalidObjectState
.customizeDescription('The operation is not valid for the object\'s storage class');
return err;
}
return null;
}
module.exports = {
startRestore,
getAmzRestoreResHeader,
validatePutVersionId,
verifyColdObjectAvailable,
};

View File

@ -7,43 +7,17 @@ const { data } = require('../../../data/wrapper');
const services = require('../../../services');
const { dataStore } = require('./storeObject');
const locationConstraintCheck = require('./locationConstraintCheck');
const { versioningPreprocessing } = require('./versioning');
const { versioningPreprocessing, overwritingVersioning } = require('./versioning');
const removeAWSChunked = require('./removeAWSChunked');
const getReplicationInfo = require('./getReplicationInfo');
const { config } = require('../../../Config');
const validateWebsiteHeader = require('./websiteServing')
.validateWebsiteHeader;
const {
externalBackends, versioningNotImplBackends, zenkoIDHeader,
} = constants;
const applyZenkoUserMD = require('./applyZenkoUserMD');
const { externalBackends, versioningNotImplBackends } = constants;
const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure.';
/**
* Retro-propagation is where S3C ingestion will re-ingest an object whose
* request originated from Zenko.
* To avoid this, Zenko requests which create objects/versions will be tagged
* with a user-metadata header defined in constants.zenkoIDHeader. When
* ingesting objects into Zenko, we can determine if this object has already
* been created in Zenko.
* Delete marker requests cannot specify user-metadata fields, so we instead
* rely on checking the "user-agent" to see the origin of a request.
* If delete marker, and user-agent came from a Zenko client, we add the
* user-metadata field to the object metadata.
* @param {Object} metaHeaders - user metadata object
* @param {http.ClientRequest} request - client request with user-agent header
* @param {Boolean} isDeleteMarker - delete marker indicator
* @return {undefined}
*/
function _checkAndApplyZenkoMD(metaHeaders, request, isDeleteMarker) {
const userAgent = request.headers['user-agent'];
if (isDeleteMarker && userAgent && userAgent.includes('Zenko')) {
// eslint-disable-next-line no-param-reassign
metaHeaders[zenkoIDHeader] = 'zenko';
}
}
'a versioned object to a location-constraint of type Azure or GCP.';
function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
metadataStoreParams, dataToDelete, log, requestMethod, callback) {
@ -78,6 +52,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
* credentialScope (to be used for streaming v4 auth if applicable)
* @param {(object|null)} overheadField - fields to be included in metadata overhead
* @param {RequestLogger} log - logger instance
* @param {string} originOp - Origin operation
* @param {function} callback - callback function
* @return {undefined} and call callback with (err, result) -
* result.contentMD5 - content md5 of new object or version
@ -85,7 +60,10 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
*/
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
overheadField, log, callback) {
overheadField, log, originOp, callback) {
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
const size = isDeleteMarker ? 0 : request.parsedContentLength;
// although the request method may actually be 'DELETE' if creating a
// delete marker, for our purposes we consider this to be a 'PUT'
@ -108,9 +86,9 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
});
return process.nextTick(() => callback(metaHeaders));
}
// if receiving a request from Zenko for a delete marker, we place a
// user-metadata field on the object
_checkAndApplyZenkoMD(metaHeaders, request, isDeleteMarker);
// if the request occurs within a Zenko deployment, we place a user-metadata
// field on the object
applyZenkoUserMD(metaHeaders);
log.trace('meta headers', { metaHeaders, method: 'objectPut' });
const objectKeyContext = {
@ -136,11 +114,26 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
size,
headers,
isDeleteMarker,
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo),
overheadField,
log,
};
// For Azure BlobStorage API compatability
// If an object already exists copy/repair creation-time
// creation-time must remain static after an object is created
// --> EVEN FOR VERSIONS <--
if (objMD) {
if (objMD['creation-time']) {
metadataStoreParams.creationTime = objMD['creation-time'];
} else {
// If creation-time is not set (for old objects)
// fall back to the last modified and store it back to the db
metadataStoreParams.creationTime = objMD['last-modified'];
}
}
if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control'];
@ -150,7 +143,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
removeAWSChunked(request.headers['content-encoding']);
metadataStoreParams.expires = request.headers.expires;
metadataStoreParams.tagging = request.headers['x-amz-tagging'];
metadataStoreParams.originOp = 's3:ObjectCreated:Put';
metadataStoreParams.originOp = originOp;
const defaultObjectLockConfiguration
= bucketMD.getObjectLockConfiguration();
if (defaultObjectLockConfiguration) {
@ -165,7 +158,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
// eslint-disable-next-line no-param-reassign
request.headers[constants.objectLocationConstraintHeader] =
objMD[constants.objectLocationConstraintHeader];
metadataStoreParams.originOp = 's3:ObjectRemoved:DeleteMarkerCreated';
metadataStoreParams.originOp = originOp;
}
const backendInfoObj =
@ -204,12 +197,42 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
const dontSkipBackend = externalBackends;
/* eslint-enable camelcase */
const mdOnlyHeader = request.headers['x-amz-meta-mdonly'];
const mdOnlySize = request.headers['x-amz-meta-size'];
return async.waterfall([
function storeData(next) {
if (size === 0 && !dontSkipBackend[locationType]) {
if (size === 0) {
if (!dontSkipBackend[locationType]) {
metadataStoreParams.contentMD5 = constants.emptyFileMd5;
return next(null, null, null);
}
// Handle mdOnlyHeader as a metadata only operation. If
// the object in question is actually 0 byte or has a body size
// then handle normally.
if (mdOnlyHeader === 'true' && mdOnlySize > 0) {
log.debug('metadata only operation x-amz-meta-mdonly');
const md5 = request.headers['x-amz-meta-md5chksum']
? new Buffer(request.headers['x-amz-meta-md5chksum'],
'base64').toString('hex') : null;
const numParts = request.headers['x-amz-meta-md5numparts'];
let _md5;
if (numParts === undefined) {
_md5 = md5;
} else {
_md5 = `${md5}-${numParts}`;
}
const versionId = request.headers['x-amz-meta-version-id'];
const dataGetInfo = {
key: objectKey,
dataStoreName: location,
dataStoreType: locationType,
dataStoreVersionId: versionId,
dataStoreMD5: _md5,
};
return next(null, dataGetInfo, _md5);
}
}
return dataStore(objectKeyContext, cipherBundle, request, size,
streamingV4Params, backendInfo, log, next);
},
@ -233,10 +256,19 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
dataGetInfoArr[0].cipheredDataKey =
cipherBundle.cipheredDataKey;
}
if (mdOnlyHeader === 'true') {
metadataStoreParams.size = mdOnlySize;
dataGetInfoArr[0].size = mdOnlySize;
}
metadataStoreParams.contentMD5 = calculatedHash;
return next(null, dataGetInfoArr);
},
function getVersioningInfo(infoArr, next) {
// if x-scal-s3-version-id header is specified, we overwrite the object/version metadata.
if (isPutVersion) {
const options = overwritingVersioning(objMD, metadataStoreParams);
return process.nextTick(() => next(null, options, infoArr));
}
return versioningPreprocessing(bucketName, bucketMD,
metadataStoreParams.objectKey, objMD, log, (err, options) => {
if (err) {

View File

@ -4,23 +4,25 @@ const {
LifecycleDateTime,
LifecycleUtils,
} = require('arsenal').s3middleware.lifecycleHelpers;
const { config } = require('../../../Config');
// moves lifecycle transition deadlines 1 day earlier, mostly for testing
const transitionOneDayEarlier = process.env.TRANSITION_ONE_DAY_EARLIER === 'true';
// moves lifecycle expiration deadlines 1 day earlier, mostly for testing
const expireOneDayEarlier = process.env.EXPIRE_ONE_DAY_EARLIER === 'true';
const {
expireOneDayEarlier,
transitionOneDayEarlier,
timeProgressionFactor,
scaledMsPerDay,
} = config.getTimeOptions();
const lifecycleDateTime = new LifecycleDateTime({
transitionOneDayEarlier,
expireOneDayEarlier,
timeProgressionFactor,
});
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime);
const oneDay = 24 * 60 * 60 * 1000; // Milliseconds in a day.
const lifecycleUtils = new LifecycleUtils(supportedLifecycleRules, lifecycleDateTime, timeProgressionFactor);
function calculateDate(objDate, expDays, datetime) {
return new Date(datetime.getTimestamp(objDate) + expDays * oneDay);
return new Date(datetime.getTimestamp(objDate) + (expDays * scaledMsPerDay));
}
function formatExpirationHeader(date, id) {

View File

@ -0,0 +1,51 @@
const { errors } = require('arsenal');
/**
* getReplicationBackendDataLocator - compares given location constraint to
* replication backends
* @param {object} locationObj - object containing location information
* @param {string} locationObj.location - name of location constraint
* @param {string} locationObj.key - keyname of object in location constraint
* @param {string} locationObj.locationType - type of location constraint
* @param {object} replicationInfo - information about object replication
* @param {array} replicationInfo.backends - array containing information about
* each replication location
* @param {string} replicationInfo.backends[].site - name of replication
* location
* @param {string} replicationInfo.backends[].status - status of replication
* @param {string} replicationInfo.backends[].dataStoreVersionId - version id
* of object at replication location
* @return {object} res - response object
* {array} [res.dataLocator] - if COMPLETED status: array
* containing the cloud location,
* undefined otherwise
* {string} [res.status] - replication status if no error
* {string} [res.reason] - reason message if PENDING/FAILED
* {Error} [res.error] - defined if object is not replicated to
* location passed in locationObj
*/
function getReplicationBackendDataLocator(locationObj, replicationInfo) {
const repBackendResult = {};
const locMatch = replicationInfo.backends.find(
backend => backend.site === locationObj.location);
if (!locMatch) {
repBackendResult.error = errors.InvalidLocationConstraint.
customizeDescription('Object is not replicated to location ' +
'passed in location header');
return repBackendResult;
}
repBackendResult.status = locMatch.status;
if (['PENDING', 'FAILED'].includes(locMatch.status)) {
repBackendResult.reason =
`Object replication to specified backend is ${locMatch.status}`;
return repBackendResult;
}
repBackendResult.dataLocator = [{
key: locationObj.key,
dataStoreName: locationObj.location,
dataStoreType: locationObj.locationType,
dataStoreVersionId: locMatch.dataStoreVersionId }];
return repBackendResult;
}
module.exports = getReplicationBackendDataLocator;

View File

@ -1,5 +1,7 @@
const s3config = require('../../../Config').config;
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
const { isServiceAccount, getServiceAccountProperties } =
require('../authorization/permissionChecks');
const { replicationBackends } = require('arsenal').constants;
function _getBackend(objectMD, site) {
const backends = objectMD ? objectMD.replicationInfo.backends : [];
@ -21,25 +23,31 @@ function _getStorageClasses(rule) {
}
const { replicationEndpoints } = s3config;
// If no storage class, use the given default endpoint or the sole endpoint
if (replicationEndpoints.length > 1) {
if (replicationEndpoints.length > 0) {
const endPoint =
replicationEndpoints.find(endpoint => endpoint.default);
replicationEndpoints.find(endpoint => endpoint.default) || replicationEndpoints[0];
return [endPoint.site];
}
return [replicationEndpoints[0].site];
return undefined;
}
function _getReplicationInfo(rule, replicationConfig, content, operationType,
objectMD) {
objectMD, bucketMD) {
const storageTypes = [];
const backends = [];
const storageClasses = _getStorageClasses(rule);
if (!storageClasses) {
return undefined;
}
storageClasses.forEach(storageClass => {
const location = s3config.locationConstraints[storageClass];
if (location && ['aws_s3', 'azure'].includes(location.type)) {
const storageClassName =
storageClass.endsWith(':preferred_read') ?
storageClass.split(':')[0] : storageClass;
const location = s3config.locationConstraints[storageClassName];
if (location && replicationBackends[location.type]) {
storageTypes.push(location.type);
}
backends.push(_getBackend(objectMD, storageClass));
backends.push(_getBackend(objectMD, storageClassName));
});
if (storageTypes.length > 0 && operationType) {
content.push(operationType);
@ -52,6 +60,7 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
storageClass: storageClasses.join(','),
role: replicationConfig.role,
storageType: storageTypes.join(','),
isNFS: bucketMD.isNFS(),
};
}
@ -65,26 +74,45 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
* @param {string} operationType - The type of operation to replicate
* @param {object} objectMD - The object metadata
* @param {AuthInfo} [authInfo] - authentication info of object owner
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
* @return {undefined}
*/
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
objectMD, authInfo, isDeleteMarker) {
objectMD, authInfo) {
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
const config = bucketMD.getReplicationConfiguration();
// If bucket does not have a replication configuration, do not replicate.
if (config) {
// If delete an object due to a lifecycle action,
// the delete marker is not replicated to the destination buckets.
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
return undefined;
}
const rule = config.rules.find(rule =>
(objKey.startsWith(rule.prefix) && rule.enabled));
// Do not replicate object in the following cases:
//
// - bucket does not have a replication configuration
//
// - replication configuration does not apply to the object
// (i.e. no rule matches object prefix)
//
// - replication configuration applies to the object (i.e. a rule matches
// object prefix) but the status is disabled
//
// - object owner is an internal service account like Lifecycle,
// unless the account properties explicitly allow it to
// replicate like MD ingestion (because we do not want to
// replicate objects created from actions triggered by internal
// services, by design)
if (config) {
let doReplicate = false;
if (!authInfo || !isServiceAccount(authInfo.getCanonicalID())) {
doReplicate = true;
} else {
const serviceAccountProps = getServiceAccountProperties(
authInfo.getCanonicalID());
doReplicate = serviceAccountProps.canReplicate;
}
if (doReplicate) {
const rule = config.rules.find(
rule => (objKey.startsWith(rule.prefix) && rule.enabled));
if (rule) {
return _getReplicationInfo(rule, config, content, operationType,
objectMD);
return _getReplicationInfo(
rule, config, content, operationType, objectMD, bucketMD);
}
}
}
return undefined;

View File

@ -1,6 +1,7 @@
const { errors } = require('arsenal');
const { errors, models } = require('arsenal');
const { BackendInfo } = require('./BackendInfo');
const { BackendInfo } = models;
const { config } = require('../../../Config');
const constants = require('../../../../constants');
/**
@ -29,7 +30,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
const bucketLocationConstraint = bucket.getLocationConstraint();
const requestEndpoint = request.parsedHost;
const controllingBackend = BackendInfo.controllingBackendParam(
const controllingBackend = BackendInfo.controllingBackendParam(config,
objectLocationConstraint, bucketLocationConstraint,
requestEndpoint, log);
if (!controllingBackend.isValid) {
@ -39,7 +40,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
};
return backendInfoObj;
}
const backendInfo = new BackendInfo(objectLocationConstraint,
const backendInfo = new BackendInfo(config, objectLocationConstraint,
bucketLocationConstraint, requestEndpoint,
controllingBackend.legacyLocationConstraint);
backendInfoObj = {

View File

@ -0,0 +1,37 @@
const { errors } = require('arsenal');
const { config } = require('../../../Config');
/**
* locationHeaderCheck - compares 'x-amz-location-constraint' header
* to location constraints in config
* @param {object} headers - request headers
* @param {string} objectKey - key name of object
* @param {string} bucketName - name of bucket
* @return {undefined|Object} returns error, object, or undefined
* @return {string} return.location - name of location constraint
* @return {string} return.key - name of object at location constraint
* @return {string} - return.locationType - type of location constraint
*/
function locationHeaderCheck(headers, objectKey, bucketName) {
const location = headers['x-amz-location-constraint'];
if (location) {
const validLocation = config.locationConstraints[location];
if (!validLocation) {
return errors.InvalidLocationConstraint.customizeDescription(
'Invalid location constraint specified in header');
}
const bucketMatch = validLocation.details.bucketMatch;
const backendKey = bucketMatch ? objectKey :
`${bucketName}/${objectKey}`;
return {
location,
key: backendKey,
locationType: validLocation.type,
};
}
// no location header was passed
return undefined;
}
module.exports = locationHeaderCheck;

View File

@ -22,8 +22,13 @@ function locationKeysHaveChanged(prev, curr) {
return curr.every(v => v.key !== prev);
}
const keysMap = {};
prev.forEach(v => { keysMap[v.key] = true; });
return curr.every(v => !keysMap[v.key]);
prev.forEach(v => {
if (!keysMap[v.dataStoreType]) {
keysMap[v.dataStoreType] = {};
}
keysMap[v.dataStoreType][v.key] = true;
});
return curr.every(v => !(keysMap[v.dataStoreType] && keysMap[v.dataStoreType][v.key]));
}
module.exports = locationKeysHaveChanged;

View File

@ -5,6 +5,7 @@ const { config } = require('../../../Config');
const vault = require('../../../auth/vault');
const { evaluateBucketPolicyWithIAM } = require('../authorization/permissionChecks');
const { scaledMsPerDay } = config.getTimeOptions();
/**
* Calculates retain until date for the locked object version
* @param {object} retention - includes days or years retention period
@ -20,8 +21,9 @@ function calculateRetainUntilDate(retention) {
const date = moment();
// Calculate the number of days to retain the lock on the object
const retainUntilDays = days || years * 365;
const retainUntilDaysInMs = retainUntilDays * scaledMsPerDay;
const retainUntilDate
= date.add(retainUntilDays, 'days');
= date.add(retainUntilDaysInMs, 'ms');
return retainUntilDate.toISOString();
}
/**

View File

@ -0,0 +1,172 @@
const async = require('async');
const { errors, s3middleware } = require('arsenal');
const { allowedRestoreObjectRequestTierValues } = require('../../../../constants');
const coldStorage = require('./coldStorage');
const monitoring = require('../../../utilities/monitoringHandler');
const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/**
* Check if tier is supported
* @param {object} restoreInfo - restore information
* @returns {ArsenalError|undefined} return NotImplemented error if tier not support
*/
function checkTierSupported(restoreInfo) {
if (!allowedRestoreObjectRequestTierValues.includes(restoreInfo.tier)) {
return errors.NotImplemented;
}
return undefined;
}
/**
* POST Object restore process
*
* @param {MetadataWrapper} metadata - metadata wrapper
* @param {object} mdUtils - utility object to treat metadata
* @param {AuthInfo} userInfo - Instance of AuthInfo class with requester's info
* @param {IncomingMessage} request - request info
* @param {object} log - Werelogs logger
* @param {function} callback callback function
* @return {undefined}
*/
function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
const METHOD = 'objectRestore';
const { bucketName, objectKey } = request;
log.debug('processing request', { method: METHOD });
const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query',
{
method: METHOD,
versionId: request.query.versionId,
error: decodedVidResult,
});
return process.nextTick(() => callback(decodedVidResult));
}
let isObjectRestored = false;
const mdValueParams = {
authInfo: userInfo,
bucketName,
objectKey,
versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
};
return async.waterfall([
// get metadata of bucket and object
function validateBucketAndObject(next) {
return mdUtils.standardMetadataValidateBucketAndObj(mdValueParams, request.actionImplicitDenies,
log, (err, bucketMD, objectMD) => {
if (err) {
log.trace('request authorization failed', { method: METHOD, error: err });
return next(err);
}
// Call back error if object metadata could not be obtained
if (!objectMD) {
const err = decodedVidResult ? errors.NoSuchVersion : errors.NoSuchKey;
log.trace('error no object metadata found', { method: METHOD, error: err });
return next(err, bucketMD);
}
// If object metadata is delete marker,
// call back NoSuchKey or MethodNotAllowed depending on specifying versionId
if (objectMD.isDeleteMarker) {
let err = errors.NoSuchKey;
if (decodedVidResult) {
err = errors.MethodNotAllowed;
}
log.trace('version is a delete marker', { method: METHOD, error: err });
return next(err, bucketMD, objectMD);
}
log.info('it acquired the object metadata.', {
'method': METHOD,
});
return next(null, bucketMD, objectMD);
});
},
// generate restore param obj from xml of request body and check tier validity
function parseRequestXmlAndCheckTier(bucketMD, objectMD, next) {
log.trace('parsing object restore information');
return parseRestoreRequestXml(request.post, log, (err, restoreInfo) => {
if (err) {
return next(err, bucketMD, objectMD, restoreInfo);
}
log.info('it parsed xml of the request body.', { method: METHOD, value: restoreInfo });
const checkTierResult = checkTierSupported(restoreInfo);
if (checkTierResult instanceof Error) {
return next(checkTierResult);
}
return next(null, bucketMD, objectMD, restoreInfo);
});
},
// start restore process
function startRestore(bucketMD, objectMD, restoreInfo, next) {
return coldStorage.startRestore(objectMD, restoreInfo, log,
(err, _isObjectRestored) => {
isObjectRestored = _isObjectRestored;
return next(err, bucketMD, objectMD);
});
},
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,
log, err => next(err, bucketMD, objectMD));
},
],
(err, bucketMD) => {
// generate CORS response header
const responseHeaders = collectCorsHeaders(request.headers.origin, request.method, bucketMD);
if (err) {
log.trace('error processing request',
{
method: METHOD,
error: err,
});
monitoring.promMetrics(
'POST', bucketName, err.code, 'restoreObject');
return callback(err, err.code, responseHeaders);
}
pushMetric('restoreObject', log, {
userInfo,
bucket: bucketName,
});
if (isObjectRestored) {
monitoring.promMetrics(
'POST', bucketName, '200', 'restoreObject');
return callback(null, 200, responseHeaders);
}
monitoring.promMetrics(
'POST', bucketName, '202', 'restoreObject');
return callback(null, 202, responseHeaders);
});
}
module.exports = objectRestore;

View File

@ -1,241 +0,0 @@
const { errors } = require('arsenal');
const crypto = require('crypto');
const constants = require('../../../../constants');
/**
* createAggregateETag - creates ETag from concatenated MPU part ETags to
* mimic AWS
* @param {string} concatETags - string of concatenated MPU part ETags
* @param {array} partList - list of parts to complete MPU with
* @return {string} aggregateETag - final complete MPU obj ETag
*/
function createAggregateETag(concatETags, partList) {
// AWS documentation is unclear on what the MD5 is that it returns
// in the response for a complete multipart upload request.
// The docs state that they might or might not
// return the MD5 of the complete object. It appears
// they are returning the MD5 of the parts' MD5s so that is
// what we have done here. We:
// 1) concatenate the hex version of the
// individual ETags
// 2) convert the concatenated hex to binary
// 3) take the md5 of the binary
// 4) create the hex digest of the md5
// 5) add '-' plus the number of parts at the end
// Convert the concatenated hex ETags to binary
const bufferedHex = Buffer.from(concatETags, 'hex');
// Convert the buffer to a binary string
const binaryString = bufferedHex.toString('binary');
// Get the md5 of the binary string
const md5Hash = crypto.createHash('md5');
md5Hash.update(binaryString, 'binary');
// Get the hex digest of the md5
let aggregateETag = md5Hash.digest('hex');
// Add the number of parts at the end
aggregateETag = `${aggregateETag}-${partList.length}`;
return aggregateETag;
}
/**
* generateMpuPartStorageInfo - generates info needed for storage of
* completed MPU object
* @param {array} filteredPartList - list of parts filtered from metadata
* @return {object} partsInfo - contains three keys: aggregateETag,
* dataLocations, and calculatedSize
*/
function generateMpuPartStorageInfo(filteredPartList) {
// Assemble array of part locations, aggregate size
// and build string to create aggregate ETag
let calculatedSize = 0;
const dataLocations = [];
let concatETags = '';
const partsInfo = {};
filteredPartList.forEach((storedPart, index) => {
const partETagWithoutQuotes =
storedPart.ETag.slice(1, -1);
const dataStoreETag = `${index + 1}:${partETagWithoutQuotes}`;
concatETags += partETagWithoutQuotes;
// If part was put by a regular put part rather than a
// copy it is always one location. With a put part
// copy, could be multiple locations so loop over array
// of locations.
for (let j = 0; j < storedPart.locations.length; j++) {
// If the piece has parts (was a put part object
// copy) each piece will have a size attribute.
// Otherwise, the piece was put by a regular put
// part and the size the of the piece is the full
// part size.
const location = storedPart.locations[j];
// If there is no location, move on
if (!location || typeof location !== 'object') {
continue;
}
let pieceSize = Number.parseInt(storedPart.size, 10);
if (location.size) {
pieceSize = Number.parseInt(location.size, 10);
}
const pieceRetrievalInfo = {
key: location.key,
size: pieceSize,
start: calculatedSize,
dataStoreName: location.dataStoreName,
dataStoreETag,
cryptoScheme: location.sseCryptoScheme,
cipheredDataKey: location.sseCipheredDataKey,
};
dataLocations.push(pieceRetrievalInfo);
// eslint-disable-next-line no-param-reassign
calculatedSize += pieceSize;
}
});
partsInfo.aggregateETag =
createAggregateETag(concatETags, filteredPartList);
partsInfo.dataLocations = dataLocations;
partsInfo.calculatedSize = calculatedSize;
return partsInfo;
}
/**
* validateAndFilterMpuParts - validates part list sent by user and filters
* parts stored in metadata against user part list
* @param {array} storedParts - array of parts stored in metadata
* @param {array} jsonList - array of parts sent by user for completion
* @param {string} mpuOverviewKey - metadata mpu key
* @param {string} splitter - mpu key divider
* @param {object} log - Werelogs instance
* @return {object} filtersPartsObj - contains 3 keys: partList, keysToDelete,
* and extraPartLocations
*/
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
splitter, log) {
let storedPartsCopy = [];
const filteredPartsObj = {};
filteredPartsObj.partList = [];
const keysToDelete = [];
storedParts.forEach(item => {
keysToDelete.push(item.key);
storedPartsCopy.push({
// In order to delete the part listing in the shadow
// bucket, need the full key
key: item.key,
ETag: `"${item.value.ETag}"`,
size: item.value.Size,
locations: Array.isArray(item.value.partLocations) ?
item.value.partLocations : [item.value.partLocations],
});
});
keysToDelete.push(mpuOverviewKey);
// Check list sent to make sure valid
const partLength = jsonList.Part.length;
// A user can put more parts than they end up including
// in the completed MPU but there cannot be more
// parts in the complete message than were already put
if (partLength > storedPartsCopy.length) {
filteredPartsObj.error = errors.InvalidPart;
return filteredPartsObj;
}
let extraParts = [];
const extraPartLocations = [];
for (let i = 0; i < partLength; i++) {
const part = jsonList.Part[i];
const partNumber = Number.parseInt(part.PartNumber[0], 10);
// If the complete list of parts sent with
// the complete multipart upload request is not
// in ascending order return an error
if (i > 0) {
const previousPartNumber =
Number.parseInt(jsonList.Part[i - 1].PartNumber[0], 10);
if (partNumber <= previousPartNumber) {
filteredPartsObj.error = errors.InvalidPartOrder;
return filteredPartsObj;
}
}
let isPartUploaded = false;
while (storedPartsCopy.length > 0 && !isPartUploaded) {
const storedPart = storedPartsCopy[0];
const storedPartNumber =
Number.parseInt(storedPart.key.split(splitter)[1], 10);
if (storedPartNumber === partNumber) {
isPartUploaded = true;
filteredPartsObj.partList.push(storedPart);
let partETag = part.ETag[0].replace(/['"]/g, '');
// some clients send base64, convert to hex
// 32 chars = 16 bytes(2 chars-per-byte) = 128 bits of
// MD5 hex
if (partETag.length !== 32) {
const buffered = Buffer.from(part.ETag[0], 'base64')
.toString('hex');
partETag = `${buffered}`;
}
partETag = `"${partETag}"`;
// If list of parts sent with complete mpu request contains
// a part ETag that does not match the ETag for the part
// stored in metadata, return an error
if (partETag !== storedPart.ETag) {
filteredPartsObj.error = errors.InvalidPart;
return filteredPartsObj;
}
// If any part other than the last part is less than
// 5MB, return an error
const storedPartSize =
Number.parseInt(storedPart.size, 10);
// allow smaller parts for testing
if (process.env.MPU_TESTING) {
log.info('MPU_TESTING env variable setting',
{ setting: process.env.MPU_TESTING });
}
if (process.env.MPU_TESTING !== 'yes' &&
i < jsonList.Part.length - 1 &&
storedPartSize < constants.minimumAllowedPartSize) {
log.debug('part too small on complete mpu');
filteredPartsObj.error = errors.EntityTooSmall;
return filteredPartsObj;
}
storedPartsCopy = storedPartsCopy.splice(1);
} else {
extraParts.push(storedPart);
storedPartsCopy = storedPartsCopy.splice(1);
}
}
if (!isPartUploaded) {
filteredPartsObj.error = errors.InvalidPart;
return filteredPartsObj;
}
}
extraParts = extraParts.concat(storedPartsCopy);
// if extra parts, need to delete the data when done with completing
// mpu so extract the info to delete here
if (extraParts.length > 0) {
extraParts.forEach(part => {
const locations = part.locations;
locations.forEach(location => {
if (!location || typeof location !== 'object') {
return;
}
extraPartLocations.push(location);
});
});
}
filteredPartsObj.keysToDelete = keysToDelete;
filteredPartsObj.extraPartLocations = extraPartLocations;
return filteredPartsObj;
}
module.exports = {
generateMpuPartStorageInfo,
validateAndFilterMpuParts,
};

View File

@ -4,13 +4,40 @@ const async = require('async');
const metadata = require('../../../metadata/wrapper');
const { config } = require('../../../Config');
const { scaledMsPerDay } = config.getTimeOptions();
const versionIdUtils = versioning.VersionID;
// Use Arsenal function to generate a version ID used internally by metadata
// for null versions that are created before bucket versioning is configured
const nonVersionedObjId =
versionIdUtils.getInfVid(config.replicationGroupId);
/** decodedVidResult - decode the version id from a query object
/** decodeVID - decode the version id
* @param {string} versionId - version ID
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
* fails due to improper format, otherwise undefined or the decoded version id
*/
function decodeVID(versionId) {
if (versionId === 'null') {
return versionId;
}
let decoded;
const invalidErr = errors.InvalidArgument.customizeDescription('Invalid version id specified');
try {
decoded = versionIdUtils.decode(versionId);
} catch (err) {
return invalidErr;
}
if (decoded instanceof Error) {
return invalidErr;
}
return decoded;
}
/** decodeVersionId - decode the version id from a query object
* @param {object} [reqQuery] - request query object
* @param {string} [reqQuery.versionId] - version ID sent in request query
* @return {(Error|string|undefined)} - return Invalid Argument if decryption
@ -20,16 +47,7 @@ function decodeVersionId(reqQuery) {
if (!reqQuery || !reqQuery.versionId) {
return undefined;
}
let versionId = reqQuery.versionId;
if (versionId === 'null') {
return versionId;
}
versionId = versionIdUtils.decode(versionId);
if (versionId instanceof Error) {
return errors.InvalidArgument
.customizeDescription('Invalid version id specified');
}
return versionId;
return decodeVID(reqQuery.versionId);
}
/** getVersionIdResHeader - return encrypted version ID if appropriate
@ -43,8 +61,7 @@ function getVersionIdResHeader(verCfg, objectMD) {
if (objectMD.isNull || !objectMD.versionId) {
return 'null';
}
return versionIdUtils.encode(objectMD.versionId,
config.versionIdEncodingType);
return versionIdUtils.encode(objectMD.versionId);
}
return undefined;
}
@ -443,6 +460,93 @@ function preprocessingVersioningDelete(bucketName, bucketMD, objectMD, reqVersio
return options;
}
/**
* Keep metadatas when the object is restored from cold storage
* but remove the specific ones we don't want to keep
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {undefined}
*/
function restoreMetadata(objMD, metadataStoreParams) {
/* eslint-disable no-param-reassign */
const userMDToSkip = ['x-amz-meta-scal-s3-restore-attempt'];
// We need to keep user metadata and tags
Object.keys(objMD).forEach(key => {
if (key.startsWith('x-amz-meta-') && !userMDToSkip.includes(key)) {
metadataStoreParams.metaHeaders[key] = objMD[key];
}
});
if (objMD['x-amz-website-redirect-location']) {
if (!metadataStoreParams.headers) {
metadataStoreParams.headers = {};
}
metadataStoreParams.headers['x-amz-website-redirect-location'] = objMD['x-amz-website-redirect-location'];
}
if (objMD.replicationInfo) {
metadataStoreParams.replicationInfo = objMD.replicationInfo;
}
if (objMD.legalHold) {
metadataStoreParams.legalHold = objMD.legalHold;
}
if (objMD.acl) {
metadataStoreParams.acl = objMD.acl;
}
metadataStoreParams.creationTime = objMD['creation-time'];
metadataStoreParams.lastModifiedDate = objMD['last-modified'];
metadataStoreParams.taggingCopy = objMD.tags;
}
/** overwritingVersioning - return versioning information for S3 to handle
* storing version metadata with a specific version id.
* @param {object} objMD - obj metadata
* @param {object} metadataStoreParams - custom built object containing resource details.
* @return {object} options
* options.versionId - specific versionId to overwrite in metadata
* options.isNull - (true/undefined) whether new version is null or not
* options.nullVersionId - if storing a null version in version history, the
* version id of the null version
*/
function overwritingVersioning(objMD, metadataStoreParams) {
metadataStoreParams.updateMicroVersionId = true;
metadataStoreParams.amzStorageClass = objMD['x-amz-storage-class'];
// set correct originOp
metadataStoreParams.originOp = 's3:ObjectRestore:Completed';
// update restore
const days = objMD.archive?.restoreRequestedDays;
const now = Date.now();
metadataStoreParams.archive = {
archiveInfo: objMD.archive?.archiveInfo,
restoreRequestedAt: objMD.archive?.restoreRequestedAt,
restoreRequestedDays: objMD.archive?.restoreRequestedDays,
restoreCompletedAt: new Date(now),
restoreWillExpireAt: new Date(now + (days * scaledMsPerDay)),
};
/* eslint-enable no-param-reassign */
const versionId = objMD.versionId || undefined;
const options = {
versionId,
isNull: objMD.isNull,
};
if (objMD.nullVersionId) {
options.extraMD = {
nullVersionId: objMD.nullVersionId,
};
}
restoreMetadata(objMD, metadataStoreParams);
return options;
}
module.exports = {
decodeVersionId,
getVersionIdResHeader,
@ -452,4 +556,6 @@ module.exports = {
versioningPreprocessing,
getVersionSpecificMetadataOptions,
preprocessingVersioningDelete,
overwritingVersioning,
decodeVID,
};

View File

@ -0,0 +1,314 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -3,7 +3,7 @@ const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/metrics');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');

View File

@ -4,7 +4,7 @@ const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../../utilities/metrics');
const monitoring = require('../../utilities/monitoringHandler');
const { getLocationConstraintErrorMessage, processNonCurrents,
validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');

View File

@ -3,7 +3,7 @@ const constants = require('../../../constants');
const services = require('../../services');
const { standardMetadataValidateBucket } = require('../../metadata/metadataUtils');
const { pushMetric } = require('../../utapi/utilities');
const monitoring = require('../../utilities/metrics');
const monitoring = require('../../utilities/monitoringHandler');
const { processOrphans, validateMaxScannedEntries } = require('../apiUtils/object/lifecycle');
const { config } = require('../../Config');

View File

@ -4,7 +4,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
/**
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets)

View File

@ -6,7 +6,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteCors';

View File

@ -2,7 +2,7 @@ const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
/**
* bucketDeleteLifecycle - Delete the bucket Lifecycle configuration

View File

@ -0,0 +1,58 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketDeleteQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketDeleteQuota' });
const { bucketName } = request;
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
request,
};
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, bucket) => next(err, bucket)),
(bucket, next) => {
bucket.setQuota(0);
metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket));
},
], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.debug('error processing request', {
error: err,
method: 'bucketDeleteQuota'
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}
module.exports = bucketDeleteQuota;

View File

@ -2,7 +2,7 @@ const metadata = require('../metadata/wrapper');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
/**
* bucketDeleteReplication - Delete the bucket replication configuration

View File

@ -1,9 +1,9 @@
const { waterfall } = require('async');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const metadata = require('../metadata/wrapper');
const util = require('node:util');
const monitoring = require('../utilities/metrics');
/**
* Bucket Delete Tagging - Delete a bucket's Tagging
@ -13,16 +13,10 @@ const monitoring = require('../utilities/metrics');
* @param {function} callback - callback to server
* @return {undefined}
*/
async function bucketDeleteTagging(authInfo, request, log, callback) {
function bucketDeleteTagging(authInfo, request, log, callback) {
const bucketName = request.bucketName;
let error = null;
log.debug('processing request', { method: 'bucketDeleteTagging', bucketName });
let bucket;
const metadataValidateBucketPromise = util.promisify(standardMetadataValidateBucket);
let updateBucketPromise = util.promisify(metadata.updateBucket);
// necessary to bind metadata as updateBucket calls 'this', causing undefined otherwise
updateBucketPromise = updateBucketPromise.bind(metadata);
const metadataValParams = {
authInfo,
bucketName,
@ -30,27 +24,39 @@ async function bucketDeleteTagging(authInfo, request, log, callback) {
request,
};
try {
bucket = await metadataValidateBucketPromise(metadataValParams, request.actionImplicitDenies, log);
let bucket = null;
return waterfall([
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
(err, b) => {
if (err) {
return next(err);
}
bucket = b;
bucket.setTags([]);
// eslint-disable-next-line no-unused-expressions
await updateBucketPromise(bucket.getName(), bucket, log);
return next();
}),
next => metadata.updateBucket(bucket.getName(), bucket, log, next),
], err => {
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
if (err) {
log.error('error processing request', {
error: err,
method: 'deleteBucketTagging',
bucketName
});
monitoring.promMetrics('DELETE', bucketName, err.code,
'deleteBucketTagging');
return callback(err, corsHeaders);
}
pushMetric('deleteBucketTagging', log, {
authInfo,
bucket: bucketName,
});
monitoring.promMetrics(
'DELETE', bucketName, '200', 'deleteBucketTagging');
} catch (err) {
error = err;
log.error('error processing request', { error: err,
method: 'deleteBucketTagging', bucketName });
monitoring.promMetrics('DELETE', bucketName, err.code,
'deleteBucketTagging');
}
const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket);
return callback(error, corsHeaders);
return callback(err, corsHeaders);
});
}
module.exports = bucketDeleteTagging;

View File

@ -6,7 +6,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteWebsite';

View File

@ -1,18 +1,15 @@
const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants');
const services = require('../services');
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/metrics');
const versionIdUtils = versioning.VersionID;
const monitoring = require('../utilities/monitoringHandler');
const { generateToken, decryptToken }
= require('../api/apiUtils/object/continueToken');
const { config } = require('../Config');
// do not url encode the continuation tokens
const skipUrlEncoding = new Set([
@ -37,8 +34,7 @@ const skipUrlEncoding = new Set([
</ListBucketResult>
*/
// Sample XML response for GET bucket objects:
/*
/* Sample XML response for GET bucket objects:
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name>
<Prefix></Prefix>
@ -68,7 +64,6 @@ const skipUrlEncoding = new Set([
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples
/*
<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name>
<Prefix>my</Prefix>
@ -109,7 +104,7 @@ const skipUrlEncoding = new Set([
*/
/* eslint-enable max-len */
function processVersions(bucketName, listParams, list, encType) {
function processVersions(bucketName, listParams, list) {
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
@ -134,7 +129,7 @@ function processVersions(bucketName, listParams, list, encType) {
xmlParams.forEach(p => {
if (p.value) {
const val = p.tag !== 'NextVersionIdMarker' || p.value === 'null' ?
p.value : versionIdUtils.encode(p.value, encType);
p.value : versionIdUtils.encode(p.value);
xml.push(`<${p.tag}>${escapeXmlFn(val)}</${p.tag}>`);
}
});
@ -150,7 +145,7 @@ function processVersions(bucketName, listParams, list, encType) {
`<Key>${objectKey}</Key>`,
'<VersionId>',
(v.IsNull || v.VersionId === undefined) ?
'null' : versionIdUtils.encode(v.VersionId, encType),
'null' : versionIdUtils.encode(v.VersionId),
'</VersionId>',
`<IsLatest>${isLatest}</IsLatest>`,
`<LastModified>${v.LastModified}</LastModified>`,
@ -268,8 +263,7 @@ function handleResult(listParams, requestMaxKeys, encoding, authInfo,
listParams.encoding = encoding;
let res;
if (listParams.listingType === 'DelimiterVersions') {
res = processVersions(bucketName, listParams, list,
config.versionIdEncodingType);
res = processVersions(bucketName, listParams, list);
} else {
res = processMasterVersions(bucketName, listParams, list);
}
@ -392,4 +386,8 @@ function bucketGet(authInfo, request, log, callback) {
return undefined;
}
module.exports = bucketGet;
module.exports = {
processVersions,
processMasterVersions,
bucketGet,
};

View File

@ -3,7 +3,7 @@ const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/metrics');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response:
/*

Some files were not shown because too many files have changed in this diff Show More