Compare commits

...

449 Commits

Author SHA1 Message Date
Rahul Padigela 19b52b6840 Merge remote-tracking branch 'origin/development/7.5' into improvement/ZENKO-2062-port 2019-08-19 12:09:50 -07:00
bert-e e7f6f3d060 Merge branch 'w/7.5/bugfix/S3C-2369-limit-batch-delete-backport' into tmp/octopus/w/8.0/bugfix/S3C-2369-limit-batch-delete-backport 2019-08-12 21:18:25 +00:00
Rahul Padigela 5588f9fb65 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2369-limit-batch-delete' into w/8.0/bugfix/S3C-2369-limit-batch-delete 2019-08-09 14:12:39 -07:00
bbuchanan9 b1cb7c1cc7 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2345/allow-config-to-disable-counters' into w/8.0/improvement/S3C-2345/allow-config-to-disable-counters 2019-08-07 11:22:53 -07:00
bert-e 5e6dba28a6 Merge branch 'feature/S3C-2284-bucket-policy-delete' into tmp/octopus/w/8.0/feature/S3C-2284-bucket-policy-delete 2019-08-01 18:00:27 +00:00
bert-e d896145d60 Merge branch 'feature/S3C-2283-bucket-policy-get' into tmp/octopus/w/8.0/feature/S3C-2283-bucket-policy-get 2019-08-01 01:06:37 +00:00
Rahul Padigela b5517b50f8 Merge remote-tracking branch 'origin/w/7.5/improvement/ZENKO-2031-update-cdmiclient' into w/8.0/improvement/ZENKO-2031-update-cdmiclient 2019-07-31 16:40:36 -07:00
bert-e 538da6ec17 Merge branch 'feature/S3C-2277-bucket-policy-put' into tmp/octopus/w/8.0/feature/S3C-2277-bucket-policy-put 2019-07-30 00:19:08 +00:00
Rahul Padigela 4ce2fc43ed Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2216-bump-tags-limit' into w/8.0/improvement/S3C-2216-bump-tags-limit 2019-07-29 10:32:04 -07:00
bert-e 2069df7ca8 Merge branches 'w/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs' and 'q/1992/7.5/bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/q/8.0 2019-07-24 23:57:43 +00:00
bert-e 41eed2de1f Merge branches 'w/8.0/feature/RELENG-2986-the-no-burst-policy' and 'q/1995/7.5/feature/RELENG-2986-the-no-burst-policy' into tmp/octopus/q/8.0 2019-07-24 23:21:43 +00:00
Thomas Carmet b7fed1898a Merge remote-tracking branch 'origin/w/7.5/feature/RELENG-2986-the-no-burst-policy' into w/8.0/feature/RELENG-2986-the-no-burst-policy 2019-07-24 13:18:48 -07:00
bert-e bb4ecec2a3 Merge branch 'bugfix/ZENKO-1829_Add_init_container_for_certs' into tmp/octopus/w/8.0/bugfix/ZENKO-1829_Add_init_container_for_certs 2019-07-24 17:24:25 +00:00
bert-e 88e01f0d8d Merge branch 'w/7.5/feature/ZENKOIO-58-upload-junit-reports' into tmp/octopus/w/8.0/feature/ZENKOIO-58-upload-junit-reports 2019-07-24 16:40:05 +00:00
Thomas Carmet 2fa0e3967a ZENKOIO-58 upload junit on kmip stage 2019-07-23 17:51:56 -07:00
Thomas Carmet 78b2f33943 Merge remote-tracking branch 'origin/w/7.5/feature/ZENKOIO-58-upload-junit-reports' into w/8.0/feature/ZENKOIO-58-upload-junit-reports 2019-07-23 17:31:12 -07:00
Katherine Laue 5b68f94c48 updated internal deps for 8.0 2019-07-17 16:44:24 -07:00
Katherine Laue a8006a4991 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2293-upgradeNodeJS' into w/8.0/improvement/S3C-2293-upgradeNodeJS 2019-07-17 16:42:27 -07:00
bert-e 3b16d16ccd Merge branch 'w/7.5/bugfix/S3C-2157-sanity-check' into tmp/octopus/w/8.0/bugfix/S3C-2157-sanity-check 2019-07-12 06:25:57 +00:00
bert-e b59e06a5db Merge branch 'w/7.5/feature/S3C-2160-eve-backport' into tmp/octopus/w/8.0/feature/S3C-2160-eve-backport 2019-07-12 00:48:57 +00:00
Rahul Padigela 5a66c65296 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2172-bucket-error' into w/8.0/bugfix/S3C-2172-bucket-error 2019-07-11 09:25:57 -07:00
bert-e 51b76c9803 Merge branch 'bugfix/ZENKO-1883-utapi-redis-config' into q/8.0 2019-06-12 18:48:04 +00:00
Rahul Padigela e40be11058 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2243-batchdelete-keys' into w/8.0/bugfix/S3C-2243-batchdelete-keys 2019-06-11 18:14:18 -07:00
Jianqin Wang 207b743266 bugfix: fix utapi redis configuration parsing 2019-06-11 11:01:30 -07:00
Salim e92ba8900f Merge branch 'w/7.5/improvement/ZENKO-1867-fix-product-version' into w/8.0/improvement/ZENKO-1867-fix-product-version 2019-06-10 14:21:57 -07:00
bert-e c704293b04 Merge branch 'w/7.5/bugfix/S3C-2076-utapi-reindex-config' into tmp/octopus/w/8.0/bugfix/S3C-2076-utapi-reindex-config 2019-06-07 05:12:57 +00:00
Jianqin Wang ca7ea1465c Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2019-redis-authentication' into w/8.0/bugfix/S3C-2019-redis-authentication 2019-06-05 16:32:09 -07:00
bbuchanan9 0f7a8a59aa Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2076/update-utapi-dep' into w/8.0/bugfix/S3C-2076/update-utapi-dep 2019-06-05 13:58:47 -07:00
bbuchanan9 eb67f17397 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2076/add-utapi-reindex' into w/8.0/bugfix/S3C-2076/add-utapi-reindex 2019-06-04 23:29:20 -07:00
Rahul Padigela 7aaf561249 Merge remote-tracking branch 'origin/w/7.5/feature/S3C-1139-sproxyd-batchdelete' into w/8.0/feature/S3C-1139-sproxyd-batchdelete 2019-06-03 23:55:02 -07:00
bert-e 30216d7d51 Merge branch 'w/7.5/bugfix/S3C-2222-artifact-conflicts' into tmp/octopus/w/8.0/bugfix/S3C-2222-artifact-conflicts 2019-05-30 21:58:29 +00:00
bert-e efacf39cfe Merge branches 'w/8.0/improvement/S3C-2034-bump-ioredis' and 'q/1836/7.5/improvement/S3C-2034-bump-ioredis' into tmp/octopus/q/8.0 2019-05-23 19:15:17 +00:00
Jianqin Wang ef469216ac Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-23 10:18:44 -07:00
Jianqin Wang 6736acb18f Merge remote-tracking branch 'origin/development/8.0' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-23 10:16:38 -07:00
Rahul Padigela 9fd8468e9c Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2118-abort-mpu-crash-2' into w/8.0/bugfix/S3C-2118-abort-mpu-crash-2 2019-05-21 16:00:08 -07:00
Jianqin Wang 91f59f876a update: package-lock.json file 2019-05-21 15:26:05 -07:00
Jianqin Wang 5a09b0226b Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-21 14:52:24 -07:00
bbuchanan9 f3ea4f1103 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2195/upload-copy-part-metrics' into w/8.0/bugfix/S3C-2195/upload-copy-part-metrics 2019-05-21 10:31:23 -07:00
bert-e 62591df809 Merge branch 'w/7.5/bugfix/S3C-2105/add-utapi-crr-conditions' into tmp/octopus/w/8.0/bugfix/S3C-2105/add-utapi-crr-conditions 2019-05-17 07:55:53 +00:00
bert-e 3148a71091 Merge branches 'w/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' and 'q/1816/7.5/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into tmp/octopus/q/8.0 2019-05-10 23:36:24 +00:00
bbuchanan9 2cf54d0579 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1506/upgrade-utapi-dependency' into w/8.0/bugfix/S3C-1506/upgrade-utapi-dependency 2019-05-10 11:52:50 -07:00
bert-e 7e42b43c86 Merge branch 'w/7.5/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes' into tmp/octopus/w/8.0/bugfix/S3C-2105/push-utapi-metrics-for-backbeat-routes 2019-05-08 23:10:25 +00:00
bert-e b587ff29e2 Merge branches 'w/8.0/bugfix/S3C-1959-sanity-check' and 'q/1805/7.5/bugfix/S3C-1959-sanity-check' into tmp/octopus/q/8.0 2019-05-02 06:39:28 +00:00
Rahul Padigela 07339703f5 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1959-sanity-check' into w/8.0/bugfix/S3C-1959-sanity-check 2019-05-01 17:33:47 -07:00
bert-e 38ddb50a1c Merge branches 'w/8.0/feature/S3C-1974_PyKMIP_in_CI' and 'q/1775/7.5/feature/S3C-1974_PyKMIP_in_CI' into tmp/octopus/q/8.0 2019-04-30 14:08:25 +00:00
Dora Korpar a779e25ca6 fix linter 2019-04-24 16:37:05 -07:00
Dora Korpar 1b958cfa23 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2120-abort-mpu-timeout' into w/8.0/bugfix/S3C-2120-abort-mpu-timeout 2019-04-24 15:20:40 -07:00
bert-e 02cb1a8c57 Merge branch 'bugfix/ZENKO-1745-Fix-HTTP-Agent_config_backport' into q/8.0 2019-04-22 17:32:22 +00:00
Taylor McKinnon 559a20c702 bf(ZENKO-1745): Fix HTTP Agent configuration in docker-entrypoint.sh 2019-04-19 16:53:09 -07:00
philipyoo 4060341963 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers' into w/8.0/bugfix/S3C-2099-zenkoUserMDFieldDeleteMarkers 2019-04-18 10:37:28 -07:00
Taylor McKinnon 1d5199f226 Merge branch 'feature/S3C-1974_PyKMIP_in_CI' into w/8.0/feature/S3C-1974_PyKMIP_in_CI 2019-04-16 10:09:47 -07:00
bert-e 68dca6cddf Merge branch 'feature/S3C-2035/tls-files-loading' into tmp/octopus/w/8.0/feature/S3C-2035/tls-files-loading 2019-03-25 17:08:00 +00:00
Guillaume Gimenez 6bf16a6d24 Merge remote-tracking branch 'origin/feature/S3C-2032/update-arsenal-deps' into w/8.0/feature/S3C-2032/update-arsenal-deps 2019-03-14 17:40:28 -07:00
Giacomo Guiulfo c83e623f44 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-1396-website-redirect-header-response' into w/8.0/bugfix/S3C-1396-website-redirect-header-response 2019-03-12 12:26:28 -07:00
Rahul Padigela 68ab0821b7 Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2002-restricted-admin-access' into w/8.0/bugfix/S3C-2002-restricted-admin-access 2019-03-11 17:30:50 -07:00
bert-e 460a9cec7e Merge branch 'feature/S3C-1969/kmip-cloudserver-integration' into tmp/octopus/w/8.0/feature/S3C-1969/kmip-cloudserver-integration 2019-03-07 00:22:16 +00:00
anurag4DSB 00023f9274 Merge remote-tracking branch 'origin/improvement/S3C-2005-updateVaultClient' into w/8.0/improvement/S3C-2005-updateVaultClient
Conflicts:
	package-lock.json
	package.json
2019-02-27 21:56:09 +00:00
bert-e 0ce57a3d34 Merge branch 'w/7.5/bugfix/ZENKO-1460-maven-fix' into tmp/octopus/w/8.0/bugfix/ZENKO-1460-maven-fix 2019-02-11 19:42:46 +00:00
Rahul Padigela 766eb10980 bugfix: ZENKO-1452 avoid proxy conflicts in ci 2019-02-08 15:05:01 -08:00
bert-e e5f208b8d2 Merge branch 'bugfix/S3C-1933-remove-pod-ttl' into tmp/octopus/w/8.0/bugfix/S3C-1933-remove-pod-ttl 2019-01-09 15:24:36 +00:00
Giacomo Guiulfo dd56a3c25c bugfix(s3-data): add noCache option 2019-01-07 16:52:28 -08:00
Rahul Padigela d4e7091c09 Merge remote-tracking branch 'origin/feature/S3C-1903-public-read' into w/8.0/feature/S3C-1903-public-read 2018-12-19 17:09:50 -08:00
bert-e 32e505e7b6 Merge branch 'bugfix/ZENKO-1337-escape-char' into tmp/octopus/w/8.0/bugfix/ZENKO-1337-escape-char 2018-12-17 21:31:46 +00:00
Dora Korpar f20b32fbe8 bugfix: ZENKO-1365 Orbit set https for RING+S3 2018-12-14 12:04:38 -08:00
JianqinWang 5839daf56c Merge remote-tracking branch 'origin/bugfix/S3C-1843-fix-npm-dependency-vuln' into w/8.0/bugfix/S3C-1843-fix-npm-dependency-vuln 2018-11-27 17:25:22 -08:00
Rahul Padigela 3a1b34a7b0 improvement: ignore .tox dir 2018-11-15 14:57:46 -08:00
Rahul Padigela d100db1950 improvement: update python dependencies for docs
fixes CVE-2018-18074 alert
2018-11-15 14:55:59 -08:00
Rahul Padigela 8ffb5d2e71 Merge remote-tracking branch 'origin/feature/S3C-1807-MD-admin-routes' into w/8.0/feature/S3C-1807-MD-admin-routes 2018-11-15 11:11:17 -08:00
Salim fca1bee0bd Merge remote-tracking branch 'origin/bugfix/ZENKO-1311-ci-fixes' into w/8.0/bugfix/ZENKO-1311-ci-fixes 2018-11-14 13:36:32 -08:00
bert-e 8497eac258 Merge branches 'w/8.0/bugfix/S3C-1752-legacy-mpu' and 'q/1618/7.4/bugfix/S3C-1752-legacy-mpu' into tmp/octopus/q/8.0 2018-11-10 01:28:22 +00:00
Dora Korpar 697d3e0ab3 Merge remote-tracking branch 'origin/bugfix/S3C-1752-legacy-mpu' into w/8.0/bugfix/S3C-1752-legacy-mpu 2018-11-09 16:55:22 -08:00
bert-e 1618a840a4 Merge branch 'improvement/simplify-ci-unit-test' into tmp/octopus/w/8.0/improvement/simplify-ci-unit-test 2018-11-09 18:19:40 +00:00
Rahul Padigela 7a32b82b2d improvement: ZENKO-1291 disable keepAlive for AWS
This commit disables keep-alive on connections to AWS as connection
reuse showed intermittent socket hang ups.
2018-11-02 14:16:06 -07:00
Rahul Padigela 9db17e5578 improvement: ZENKO-1291 configurable httpAgent options
This commit adds the ability to configure httpAgent options
for external backends. Currently only AWS and GCP are
suppored. Azure is not supported as there is no straight
forward way to set a custom httpAgent in the Azure SDK.
The defaults are expressed to be sensible and explicit.
The default maxSockets is inifinity which is expressed as
`null` and other values are inspired by node.js defaults.
This configuration is applied globally for all locations of
the same type of external backend.
2018-11-02 14:16:01 -07:00
Rahul Padigela 654d6105aa improvement: parse and include gcp request id on errors 2018-10-30 11:39:27 -07:00
Dora Korpar 75646b55ed ft: ZENKO 1042 azure user-agent tagging" 2018-10-30 10:58:52 -07:00
Dora Korpar 73aa224e30 ft: ZENKO 390 aws user-agent tagging 2018-10-30 10:58:04 -07:00
bert-e e370bd6aa4 Merge branch 'improvement/ZENKO-1281-logRequestIds' into q/8.0 2018-10-29 20:18:29 +00:00
Rahul Padigela 763c58f36c improvement: log external request id 2018-10-28 21:32:44 -07:00
Rahul Padigela 1b8c4cc15f Merge remote-tracking branch 'origin/improvement/S3C-1745-bucket-deletion' into w/8.0/improvement/S3C-1745-bucket-deletion 2018-10-24 18:54:25 -07:00
jeremyds dd500ed601 Revert "bugfix: reduce flakiness of server startup wait"
This reverts commit a65eb80873.
2018-10-24 11:07:38 -07:00
Rahul Padigela 16cb2934ec improvement: add aws request ids to logs 2018-10-24 10:45:43 -07:00
Dora Korpar 2404d52a78 Merge remote-tracking branch 'origin/feature/S3C-1499-apiv2' into w/8.0/feature/S3C-1499-apiv2 2018-10-17 11:50:14 -07:00
Alexander Chan 674b2626a4 ft: ZENKO-1186 stalled sanity check 2018-09-20 15:43:18 -07:00
Alexander Chan a518e48d09 improvement: update arsenal 2018-09-15 19:37:45 -07:00
Jonathan Gramain 83356f9510 bf: ZENKO-1146 read from source until replicated to preferred read
When a preferred read location is defined for an object, and when
status of replication to this location is PENDING or FAILED, read
object from the source location instead of returning an error.

Note that reads are not guaranteed to succeed if the source location
is a transient source, because of a race condition with the garbage
collector. This will be addressed in a future change.
2018-09-14 16:00:26 -07:00
jeremyds d8010b26ca Merge remote-tracking branch 'origin/development/8.0' into HEAD 2018-09-10 18:12:58 -07:00
jeremyds d8b4e241fc ZENKO-1124: mongo listing fix, bump arsenal version. 2018-09-10 18:07:09 -07:00
Nicolas Humbert b640764a54 fx: ZENKO-1112 Management client error logging 2018-09-10 14:08:18 -07:00
Rahul Padigela 7084975d69 bugfix: filter NoSuchKey errors when deleting on AWS/GCP
This accounts for 404s when trying to delete something that doesn't exist
or hasn't been replicated yet. It returns no error to the client as the delete
is a no-op and is inline with S3 API compatibility.
2018-09-07 13:56:36 -04:00
Nicolas Humbert 34cf34551c fx: ZENKO-1108 fail to get metrics report 2018-09-06 16:54:34 -07:00
Bennett Buchanan b3ad82b0e4 feature: ZENKO-1108 Set region for AWS locations 2018-09-05 11:35:08 -07:00
Rahul Padigela d982f8f603 bugfix: set region endpoint during location setup
This commit removes the current redirect code and adds a setup method
that makes a call to get the region of the destination bucket on AWS S3
and updates the endpoint to use the correct region specific endpoint.
This should avoid redirects from AWS which in turn messes up the payload
causing errors during PUTs. The setup call will be cleaned up to address
the technical debt of waiting for the callback.
2018-09-05 01:04:47 -04:00
Rahul Padigela 9394d4111b improvement: revert ZENKO-819 regional endpoints 2018-09-04 16:00:18 -04:00
Rahul Padigela b17904e7f5 improvement: use updated UtapiClient for location metric 2018-08-31 16:32:43 -07:00
Alexander Chan 7bfe526313 bf: ZENKO-1024 add pending field to crr metric reports 2018-08-29 13:03:19 -07:00
bert-e 523998070d Merge branch 'bugfix/ZENKO-1051_Add_sentinel_support_to_redis_config' into q/8.0 2018-08-24 02:39:22 +00:00
Taylor McKinnon e06584a20e ZENKO-1051 Add redis sentinel support 2018-08-23 17:57:38 -07:00
bert-e 0651fb2aff Merge branch 'bugfix/ZENKO-1013/useBackbeatApiMetrics' into q/8.0 2018-08-24 00:14:47 +00:00
Alexander Chan 1fcbcab749 bf: ZENKO-1013 use backbeat api metrics routes for stats 2018-08-23 16:02:32 -07:00
Dora Korpar 746c8a7542 bf: ZENKO 876 increase flaky test timeout 2018-08-23 13:10:16 -07:00
jeremyds 846f15ad77 Merge remote-tracking branch 'origin/bugfix/ZENKO-945-arsenal-fix-in-s3' into w/8.0/bugfix/ZENKO-945-arsenal-fix-in-s3 2018-08-23 10:41:22 -07:00
Bennett Buchanan 8c94b5e390 Merge remote-tracking branch 'origin/bugfix/ZENKO-1018/use-disabled-status' into w/8.0/bugfix/ZENKO-1018/use-disabled-status 2018-08-22 12:24:53 -07:00
bert-e 24171052fd Merge branch 'feature/ZENKO-925-increaseMetricsExpiry' into q/8.0 2018-08-09 01:19:32 +00:00
philipyoo 7a47236b60 ft: ZENKO-925 increase metrics expiry to 24hrs 2018-08-08 17:55:17 -07:00
LaureVergeron d9694a92ba ZNC-22: DOC: Add developer bootstrap guide 2018-08-08 17:03:21 -07:00
bert-e 49055d540a Merge branch 'bugfix/ZENKO-903/getBucketListOnReport' into q/8.0 2018-08-08 20:28:52 +00:00
bert-e 467ad71cdb Merge branch 'bugfix/ZENKO-916-doNotReplicateACLChange' into q/8.0 2018-08-08 19:34:59 +00:00
Alexander Chan cadb594b31 bf: ZENKO-903 add tests for report itemCount 2018-08-08 11:44:20 -07:00
Alexander Chan 7cef25cdbd improvement: update arsenal 2018-08-08 10:06:10 -07:00
Alexander Chan c05e41a567 bf: ZENKO-922 add redis disconnect tests 2018-08-07 20:38:08 -07:00
Jonathan Gramain 544d464c6e bf: ZENKO-916 do not replicate ACL change
We should not reset the replication status to PENDING when changing
ACLs of an object. This because we choose not to replicate ACLs to
replication targets. Later on we may see this as a feature but not
now.

As per description in ZENKO-916, it fixes because putting ACLs is the
only known problematic case where it breaks with a transient source
location. E.g. object tags get replicated correctly because we only
replicate tags specifically in such case.
2018-08-06 16:41:30 -07:00
Mathieu Cassagne b4ba5fa7e2 Merge remote-tracking branch 'origin/bugfix/S3C-1586/listing_while_upgrade' into w/8.0/bugfix/S3C-1586/listing_while_upgrade 2018-08-02 14:49:35 +02:00
Giacomo Guiulfo 2e40464a38 chore: update arsenal commit hash 2018-07-31 13:04:14 -07:00
Giacomo Guiulfo 6ed42823a1 feat: check metadata in non-deep healthchecks 2018-07-31 13:03:17 -07:00
bert-e c28c2abd6c Merge branch 'bugfix/ZENKO-791-quota-update-on-putfail' into q/8.0 2018-07-31 00:42:47 +00:00
bert-e 69530a75f7 Merge branch 'bugfix/ZENKO-884_Honor_PUSH_STATS_in_push_mode' into q/8.0 2018-07-31 00:14:26 +00:00
philipyoo 155327ea30 bf: ZENKO-751 update arsenal package 2018-07-30 16:27:49 -07:00
Dora Korpar 4964fa4b56 bf: ZENKO 791 quota update on put fail 2018-07-30 15:58:16 -07:00
Taylor McKinnon 33b2d5bec0 bf(ZENKO-884): Honor PUSH_STATS in push management mode 2018-07-30 15:55:01 -07:00
Jonathan Gramain 5e17ec8343 bf: ZENKO-835 do not replicate lifecycle actions
All actions coming from lifecycle (or potentially any service account)
are not replicated anymore. This applies now to delete markers created
by lifecycle expiration rules.
2018-07-27 17:35:38 -07:00
bert-e 6e7f5c446a Merge branch 'feature/S3C-1602-logEndMessageInBackbeatRoutes' into tmp/octopus/w/8.0/feature/S3C-1602-logEndMessageInBackbeatRoutes 2018-07-26 23:23:41 +00:00
Rahul Padigela cc0ef104bc improvement: ZENKO-762 read crr stats from Redis HA
This changes the behavior of the report handler to use Redis HA to
read the metrics from instead of localCache
2018-07-25 17:20:33 -07:00
Rahul Padigela e71dae7675 improvement: ZENKO-819 use regional endpoints
This commit adds a retry for requests to AWS where the api call fails as the common endpoint
s3.amazonaws.com is not redirected to the regional endpoints anymore. If the original call
recives AuthorizationHeaderMalformed, a GET Bucket location call is made and the endpoint is
updated to use the correct region in it's hostname.
2018-07-25 15:29:55 -07:00
Dora Korpar 8db2d9dd45 feature: ZENKO 829 bump utapi for rc4 2018-07-24 14:44:16 -07:00
bert-e 2263347229 Merge branch 'feature/ZENKO-777/addLocReplicationReports' into q/8.0 2018-07-24 21:21:35 +00:00
bert-e 5255c93e02 Merge branch 'improvement/ZENKO-760-no-cluster' into q/8.0 2018-07-23 20:01:57 +00:00
bert-e 0c3763d418 Merge branch 'bugfix/ZENKO-745-uniqueLocationMetricsRedisKeys' into q/8.0 2018-07-23 19:32:44 +00:00
Rahul Padigela 4c138ef967 improvement: ZENKO-760 prepare setup to work without cluster
To be inline with Kubernetes pods sentiment of running one process per pod, the cluster
module usage is removed when workers are configured to be 1. Another change is to move
the metadata setup to make sure connection to MongoDB is in place before accepting any
requests.
The code related to Orbit's management has been adapted to work in non-cluster mode.
2018-07-23 11:25:23 -07:00
bert-e 486abd42ea Merge branch 'improvement/ZENKO-780-configure-push-stats' into q/8.0 2018-07-23 17:29:18 +00:00
bert-e 94898f2b30 Merge branch 'w/8.0/improvement/debug-s3-logs' into tmp/octopus/q/8.0 2018-07-23 16:13:28 +00:00
Jonathan Gramain 23c016dd3d bf: ZENKO-745 unique location metrics redis keys
Use new 'objectId' field set by Orbit to each location for the redis
key, instead of the location name. This will avoid conflicts if
locations are re-created with the same name.

Take the opportunity to enforce an objectId to be set on each
location, even if the config is from a static file, to make its use
durable.
2018-07-20 18:22:30 -07:00
Giacomo Guiulfo afbd2a771e rf: configure push stats in poll mode 2018-07-20 15:51:21 -07:00
Dora Korpar fada99222f bf: ZENKO 790 redis limit return 2018-07-20 13:09:44 -07:00
bert-e 868435bde8 Merge branch 'improvement/ZENKO-749-removeS3toS3ObjectTestFrom-aws-node-sdk' into q/8.0 2018-07-19 21:58:12 +00:00
bert-e 429ca8b1c6 Merge branch 'development/8.0' into tmp/octopus/w/8.0/improvement/debug-s3-logs 2018-07-19 21:53:42 +00:00
Rahul Padigela 01ada9701c Merge remote-tracking branch 'origin/improvement/debug-s3-logs' into w/8.0/improvement/debug-s3-logs 2018-07-19 13:59:53 -07:00
anurag4dsb e15b021112
improvement: ZENKO-749 removed crr s3 to s3 object test from aws-node-sdk 2018-07-19 13:47:16 -07:00
Jonathan Gramain bed3422f15 bf: ZENKO-660 api proxy fixes
- reject unauthenticated requests on /_/backbeat/api routes:

  - don't allow access to `/_/backbeat/api` passthrough routes for
    public users. It's a temporary option until we have support for
    admin accounts or accounts with admin privileges.

- fix error handling by catching errors raised by http-proxy module
2018-07-19 13:43:58 -07:00
Alexander Chan aac8163c34 ft: ZENKO-777 add pause/resume status reports 2018-07-19 13:39:52 -07:00
Dora Korpar 5ee0a6a310 bf: ZENKO 773 failing lifecycle unit tests 2018-07-19 11:10:15 -07:00
philipyoo 2119e1d195 bf: bump arsenal version changes 2018-07-17 13:35:07 -07:00
Taylor McKinnon da4bfafcfc bf(zenko-473): Handle a unspecified storage quota correctly 2018-07-16 15:02:59 -07:00
Dora Korpar a0ef4ae3d6 bf: ZENKO 726 storage limit 2018-07-13 14:15:16 -07:00
Dora Korpar 2f79846df2 bf: ZENKO 726 storage limit config level 2018-07-13 11:58:48 -07:00
Giacomo Guiulfo c7bc911b91 bugfix: gcp behind an https proxy 2018-07-12 12:48:00 -07:00
Bennett Buchanan aa2b2074b7 bugfix: ZENKO-315 Update create NFS bucket script
* Add node shebang to NFS util
* Add location constraint parameter
2018-07-09 15:27:36 -07:00
Bennett Buchanan 5175639465 bugfix: ZENKO-665 Only create version for replica 2018-07-09 14:21:15 -07:00
Dora Korpar 760fc9e6ea bf: ZENKO 646 allow mdsearch by replication status 2018-07-09 13:34:06 -07:00
Giacomo Guiulfo 96f5dc72a0 ft: add "no_proxy" environment variables 2018-07-09 11:42:56 -07:00
bert-e 5033ecffb2 Merge branch 'feature/ZENKO-679-utapi-config' into q/8.0 2018-07-06 23:42:37 +00:00
bert-e 5cfa98e26a Merge branch 'bugfix/ZENKO-632-readable-destroy' into q/8.0 2018-07-06 23:29:26 +00:00
Salim 64b67cc6ef ft: Add storage quota config 2018-07-06 16:10:30 -07:00
Rahul Padigela 7b2b23b80e bugfix: ZENKO-632 avoid process crash
This brings Arsenal check where it crashes CloudServer when Readable
stream's destroy method is accessed without checking if it is available
2018-07-06 16:03:33 -07:00
bert-e 1545e7d89f Merge branch 'bugfix/ZENKO-635-deleteMarkerReplicatedToRoot' into q/8.0 2018-07-06 23:00:38 +00:00
Jonathan Gramain 0ae315ee6d bf: ZENKO-635 delete marker not replicated correctly
Delete markers were lacking the bucket prefix on the backend keys for
cloud targets, which means they were ineffective in making the actual
objects appearing deleted. Fix this by adding a new "toObjectGetInfo"
method in multiple backends gateway, because right now the
responsibility of generating the key is per backend (IMO should be
reworked to make it global to all backends, but not the time to
refactor this).
2018-07-06 15:03:08 -07:00
bert-e 499e2d3be2 Merge branch 'feature/ZENKO-562' into q/8.0 2018-07-06 21:20:35 +00:00
Dora Korpar 33a9c2becf bf: ZENKO 676 - only location metric 2018-07-06 12:29:01 -07:00
Giacomo Guiulfo ffbe5fc6af feat: set replication endpoints config file path 2018-07-06 10:46:25 -07:00
bert-e 3bd3cf06d3 Merge branch 'bugfix/ZENKO-659-getReplicationInfoWithPreferredRead' into q/8.0 2018-07-05 23:22:25 +00:00
bert-e b51a959286 Merge branch 'improvement/ZENKO-641-websocket-exception' into q/8.0 2018-07-05 22:54:39 +00:00
Rahul Padigela bb6dbb94a1 improvement: log errors to avoid ws throwing exceptions
websocket npm module throws errors in its methods where callbacks are
optional but not passed as a param. To avoid this a logger helper method
is passed a callback so that if an error is raised, it gets logged.
2018-07-05 15:35:07 -07:00
Jonathan Gramain ae17a5bf5b bf: ZENKO-659 fix "storageType" in object replicationInfo
When a preferred read location is defined, the storageType attribute
did not contain that location.

Also fix the lookup of the location type in "x-scal-storage-type" in
backbeat routes: create the array of location types first instead of
looking for a substring directly.
2018-07-05 15:02:05 -07:00
bert-e 8c93fed2b1 Merge branch 'w/8.0/improvement/using-vault-on-all-secrets' into tmp/octopus/q/8.0 2018-07-05 19:57:14 +00:00
bert-e c5a07b99c7 Merge branch 'improvement/using-vault-on-all-secrets' into tmp/octopus/w/8.0/improvement/using-vault-on-all-secrets 2018-07-05 09:30:56 +00:00
Dora Korpar a6d8522950 ft: ZENKO 637 add proxy cert env var 2018-07-03 17:42:06 -07:00
Taylor McKinnon 57ddcd1ef7 Fix: Add Accept-Ranges header to response 2018-07-03 14:38:33 -07:00
Alexander Chan 5f74f7f695 update arsenal dependecy 2018-07-01 14:44:24 -07:00
Alexander Chan 5978c8c82d ft: ZENKO-597 account for transient source in TDM 2018-06-30 16:06:52 -07:00
bert-e 1477c2aec9 Merge branch 'feature/ZENKO-584-crrFailureMetrics' into q/8.0 2018-06-30 21:16:50 +00:00
philipyoo 0d740b6624 ft: add crr metrics failures 2018-06-30 13:56:17 -07:00
bert-e 801cd26e67 Merge branch 'w/8.0/feature/ZENKO-267-MD-Ingestion-Routes' into tmp/octopus/q/8.0 2018-06-30 20:39:55 +00:00
Rahul Padigela 88a51cea19 Merge remote-tracking branch 'origin/feature/ZENKO-267-MD-Ingestion-Routes' into w/8.0/feature/ZENKO-267-MD-Ingestion-Routes 2018-06-30 13:13:23 -07:00
Alexander Chan 2807d6c535 bf: ZENKO 616 use localCache redis 2018-06-30 12:52:57 -07:00
bert-e 5b15a2df3f Merge branch 'feature/ZENKO-610-nfs-replication' into q/8.0 2018-06-30 18:51:31 +00:00
Rahul Padigela 78f49f3597 feature: ZENKO-382 skip version for nfs exported bucket 2018-06-30 11:29:06 -07:00
bert-e 19569c7213 Merge branch 'w/8.0/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies' into tmp/octopus/q/8.0 2018-06-30 09:46:45 +00:00
bert-e 2dda9fb55f Merge branch 'improvement/updateArsenalDependency' into q/8.0 2018-06-30 06:15:11 +00:00
Alexander Chan 2338f4334f improvement: update arsenal dependency 2018-06-29 22:03:58 -07:00
bert-e 2a2193356a Merge branch 'feature/ZENKO-270/checkUploadSize' into q/8.0 2018-06-30 04:53:17 +00:00
bert-e 19b35bf1ed Merge branch 'feature/ZENKO-315/CRRWithoutVersioning' into q/8.0 2018-06-30 04:38:51 +00:00
bert-e 141225bc01 Merge branch 'feature/ZENKO-582-preferred-read' into q/8.0 2018-06-30 04:31:16 +00:00
bert-e c1040cf16e Merge branch 'bugfix/ZENKO-579-skip-scan-func-test' into q/8.0 2018-06-30 04:19:59 +00:00
Bennett Buchanan b8a2c8ede5 feature: ZENKO-315 Allow CRR without versioning 2018-06-29 21:18:24 -07:00
Dora Korpar 9840d96230 ft: ZENKO-582 preferred read location
Change preferred read location implementation to use the
":preferred_read" attribute set in the bucket replication
configuration
2018-06-29 21:13:10 -07:00
Alexander Chan f81865c61f ft: ZENKO-270 add data upload limit 2018-06-29 20:50:56 -07:00
vrancurel 40d20ce2ca bugfix: add a func test for skip scan
Currently this test shall fail on Mongo because
the current code is unable to fetch the 5
common prefixes in one listing call.
This will be fixed by the next Arsenal PR.
2018-06-29 20:50:37 -07:00
vrancurel 29bb06b2af bugfix: versioningGeneral2 were not passing
The bug has been fixed in Arsenal and was related
to some objects being created before the versioning
was enabled in the bucket and therefore did not
jave the versionId property
2018-06-29 20:42:57 -07:00
Rahul Padigela c92176b7d9 feature: ZENKO-600 implement reverse proxy for backbeat api 2018-06-29 20:02:41 -07:00
Alexander Chan deb7a8fa5e bf: ZENKO-624 fix report handler test timer 2018-06-29 19:36:24 -07:00
bert-e 47b57e1a00 Merge branch 'feature/ZENKO-572/orbitSiteMetrics' into q/8.0 2018-06-29 19:11:53 +00:00
Alexander Chan a73b4938ed ft: ZENKO-572 extend CRR stats include to per-site metrics 2018-06-29 11:42:47 -07:00
David Pineau a85a56bd14 Merge remote-tracking branch 'origin/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies' into w/8.0/bugfix/port-dev/6.4-to-dev/7.4-and-fix-dependencies 2018-06-29 16:35:45 +02:00
Taylor McKinnon 84059b13da fix: Disable docker build cache in CI 2018-06-28 16:45:24 -07:00
bert-e 6a91372c21 Merge branch 'improvement/use-commithash' into q/8.0 2018-06-28 22:24:59 +00:00
Rahul Padigela d5d247bec1 improvement: lock scality dependencies using commit hash 2018-06-28 15:01:14 -07:00
bert-e 77443dbb93 Merge branch 'feature/ZENKO-413/gcpVirtualHostSupport' into q/8.0 2018-06-28 18:01:10 +00:00
bert-e 76b98aac72 Merge branch 'feature/ZENKO-315/AddCreateBucketScript' into q/8.0 2018-06-28 17:30:44 +00:00
David Pineau 50ddf5e7a7 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 14:28:37 +02:00
Dora Korpar 5f1beff287 ft: ZENKO 142 location storage quota 2018-06-27 16:46:48 -07:00
Alexander Chan d6e1c6a4fb ft: add hostnames 2018-06-27 13:38:40 -07:00
Alexander Chan 286fba8bdb ft: ZENKO-413 add GCP support virtual hosts url 2018-06-27 13:38:40 -07:00
anurag4DSB 1691eae108
ft: ZENKO-20 add prom-monitoring for cloudserver
Signed-off-by: anurag4dsb <anurag.mittal@scality.com>
2018-06-27 10:28:37 -07:00
bert-e 41d1ff1144 Merge branches 'w/8.0/feature/ZENKO-586-file-backend-eve' and 'q/1316/7.4/feature/ZENKO-586-file-backend-eve' into tmp/octopus/q/8.0 2018-06-27 00:50:48 +00:00
bert-e b7cec3aa26 Merge branch 'feature/ZENKO-586-file-backend-eve' into tmp/octopus/w/8.0/feature/ZENKO-586-file-backend-eve 2018-06-27 00:30:17 +00:00
jeremyds 7d922f0c33 bf: ZENKO-308 remove versioningGeneral1 test skiping with mongoDB
Now the listing bug has been fixed in Arsenal, it is time to make the
versioningGeneral1 test run with mongoDB back-end.
2018-06-26 17:05:38 -07:00
bert-e adf1e638c8 Merge branch 'feature/orbit-capabilities' into q/8.0 2018-06-26 23:23:14 +00:00
Rached Ben Mustapha e6781cf9e6 ft: Add capabilities in Orbit reporting 2018-06-26 16:06:18 -07:00
bert-e 19ba37caa3 Merge branch 'feature/ZENKO-143-transientSource' into q/8.0 2018-06-26 23:02:14 +00:00
bert-e a7fc5eb43e Merge branch 'feature/ZENKO-596-add-redis-ha' into q/8.0 2018-06-26 21:22:55 +00:00
Jonathan Gramain ab5d69d825 ft: ZENKO-143 support 'isTransient' location property 2018-06-26 13:30:29 -07:00
bert-e 62ad3b41f2 Merge branch 'bugfix/ZENKO-576/fixLocationSupportsVersioning' into q/8.0 2018-06-26 19:52:53 +00:00
Salim 455a0800d2 feature: add redis HA options 2018-06-26 11:55:51 -07:00
Bennett Buchanan 9b5dcea4e0 feature: ZENKO-315 Script create bucket with NFS 2018-06-26 11:07:28 -07:00
Thomas Carmet 1784e6fe9f Merge remote-tracking branch 'origin/feature/improve-docker-build-tags' into w/8.0/feature/improve-docker-build-tags 2018-06-26 16:57:00 +02:00
Alexander Chan b2ec8ba994 bf: ZENKO-576 set missing supportsVersioning in loc details 2018-06-25 20:43:47 -07:00
Rahul Padigela a65eb80873 bugfix: reduce flakiness of server startup wait 2018-06-25 17:26:07 -07:00
Rahul Padigela 8937152a78 bugfix: add isNFS property to the expected result 2018-06-25 16:58:43 -07:00
Jonathan Gramain 8874f97045 ft: ZENKO-143 batchDelete backbeat route
- Implement 'POST /_/backbeat/batchdelete' backbeat route to get rid
  of an array of data locations. The route will be used by the garbage
  collector service.

  * This includes some reorganization of backbeat routes sanity checks

- Handle gracefully 404 errors from datastore backend:

  * no need to retry 404 errors as they are permanent

  * batch delete can also gracefully handle missing data locations and
    still delete other existing locations (may happen if a retry
    occurs, when replaying a kafka message among other cases).

- Support service-gc service account with account owner access rights.
2018-06-22 16:41:21 -07:00
Rached Ben Mustapha 5543c13fd5 ft: Make websocket use proxies 2018-06-22 11:11:07 -07:00
Rached Ben Mustapha 546d6f157b ft: Add optimized dependencies for ws 2018-06-22 11:11:07 -07:00
Jonathan Gramain 9d86dfad53 ft: ZENKO-143 main location in loc constraint header
Allow setting the main object location name in the
x-amz-location-constraint header, not only replication targets.
2018-06-21 13:45:28 -07:00
bert-e 3aeba783e3 Merge branch 'bugfix/ZENKO-576-fixVersioningBackendSupportCheck' into q/8.0 2018-06-21 19:34:12 +00:00
bert-e 27b425cd61 Merge branch 'bugfix/ZENKO-567-locationHeaderCheckException' into q/8.0 2018-06-21 19:21:48 +00:00
Jonathan Gramain 839ac21161 bf: ZENKO-576 add missing 'supportsVersioning' flag
Add 'supportsVersioning' flag to file and mem locations.
2018-06-21 10:21:05 -07:00
bert-e 52281eb7c6 Merge branch 'bugfix/ZENKO-575-package-regression' into q/8.0 2018-06-21 01:39:56 +00:00
Salim b89142c9a2 bf: Add back start_mongo
ZENKO-575 fixes regresssion that caused legacy use of start_mongo to stop working
2018-06-20 17:47:03 -07:00
bert-e 2d0413910e Merge branch 'w/8.0/feature/ZENKO-573-s3-eve-artifacts' into tmp/octopus/q/8.0 2018-06-21 00:33:27 +00:00
bert-e abc29538d9 Merge branch 'bugfix/ZENKO-498/fixSproxydLocationConfig' into q/8.0 2018-06-21 00:32:47 +00:00
bert-e 44f3c4e329 Merge branch 'feature/ZENKO-573-s3-eve-artifacts' into tmp/octopus/w/8.0/feature/ZENKO-573-s3-eve-artifacts 2018-06-21 00:05:07 +00:00
Stefano Maffulli eb2a6009ed Added info to submit package to NPM
Following instructions on npmjs.org, renamed the package name to
@zenko/cloudserver because 'cloudserver' alone could not be used
because it clashed with pre-existing projects.

The 8.0.0-beta version was picked at random, should be changed.
2018-06-20 16:01:56 -07:00
bert-e 8b70872b1a Merge branch 'feature/ZENKO-551-mutliple-backend-tests' into tmp/octopus/w/8.0/feature/ZENKO-551-mutliple-backend-tests 2018-06-20 21:32:46 +00:00
Thomas Carmet 6cedb7f4aa Merge branch 'development/7.4' into feature/last-forward-port 2018-06-20 14:15:24 -07:00
Jonathan Gramain d43518075b bf: ZENKO-567 enforce "details" in location constraint
Make sure when Orbit configuration is applied, a "details" attribute
is always present in the locations config.
2018-06-20 13:55:18 -07:00
Jonathan Gramain c7633a772a bf: ZENKO-567 config validation may crash
Config validation may crash if locationConstraint[x].details is not
set because of a reversed check.

NOTE: unit tests in locConstraintAssert.js are broken (regexp
line-wrap does not work), let them alone, not worth the time since
they will hopefully be replaced by more robust config parsing in the
near future.
2018-06-20 13:55:18 -07:00
Alexander Chan 00c42ffcfa bf: ZENKO-498 - fix sproxy location config 2018-06-20 13:52:58 -07:00
bert-e c05e6c39db Merge branch 'improvement/ZENKO-547-removeCirleCI' into q/8.0 2018-06-20 20:52:32 +00:00
bert-e 16765466d1 Merge branch 'feature/ZENKO-560/addOrbitAdvOptSupport' into q/8.0 2018-06-20 00:58:20 +00:00
Rahul Padigela 009a012b47 feature: S3C-1212 support expire metrics config
(cherry picked from commit f264078e8c)
2018-06-19 17:38:28 -07:00
Alexander Chan f702c62c3f ft: add cloudserver support for advanced options 2018-06-19 16:15:26 -07:00
Thomas Carmet 2dba0c0c71 ft: adding secrets for 7.4 2018-06-19 15:12:04 -07:00
VR 22e9bf1eb7
Merge pull request #1290 from scality/improvement/packageNameChange
improvement: name change for npmjs
2018-06-19 13:50:47 -07:00
vrancurel 5c6924f35c improvement: name change for npmjs 2018-06-19 13:49:27 -07:00
Salim 94a1e98e8e bugfix: Fix data server regression
ZENKO-464 Fixes regression that prevented s3 from starting properly
and renames the start_mongo script to cloudserver for clear definition.
Regression introduced in commit 1fd51c8fe1
2018-06-18 15:05:49 -07:00
Rahul Padigela 0f021af79d improvement: remove enterprise circle ci 2018-06-18 00:18:16 -07:00
Salim 25441f7a60 feature: push images to private registry
ZENKO-476 after building post merge images, they will be pushed
to the private registry for E2E testing.
2018-06-15 17:33:31 -07:00
Rahul Padigela 70049b7336
Merge pull request #1272 from scality/bugfix/ZENKO-436/pickReadLocationFix
bugfix/ZENKO-436 retrieve correct read location constraint
2018-06-15 11:45:19 -07:00
Alexander Chan 79b78997f0 bf: ZENKO-436 retrieve correct read location constraint
(cherry picked from commit b7fb379703)
+ some typo fixes
2018-06-14 16:32:30 -07:00
Rahul Padigela a8943775c9
Merge pull request #1278 from scality/feature/ZENKO-485-eve-on-s3
Feature/zenko 485 eve on s3
2018-06-13 14:14:47 -07:00
Salim 387cac920e feature: Speed up Docker rebuild using cache
Allows faster CI and dev builds of the Docker image through the use
of cached layers.
2018-06-13 13:33:14 -07:00
Salim 640701166e feature: ZENKO-482 Eve CI
This integrates the mongo ft_tests suite into the Eve CI using
kube workers that can be the template for future workers and porting
the remaining tests over.
2018-06-13 13:33:14 -07:00
Rached Ben Mustapha 99d6f6f2a4
Merge pull request #1242 from scality/ft/ZENKO-319-sproxydLocationType
ft: ZENKO-319 adds sproxyd location type management
2018-06-07 13:38:51 -07:00
Alexander Chan 323a69cc17 ft: ZENKO-319 adds sproxyd location type management 2018-06-07 10:10:28 -07:00
Rached Ben Mustapha 09b8d17a61
Merge pull request #1271 from scality/fix/ZENKO-471-no-subbyte-report
fix: Ensure CRR reports are full bytes
2018-06-07 09:19:48 -07:00
Rached Ben Mustapha cf175847fb fix: Ensure CRR reports are full bytes 2018-06-06 16:28:57 -07:00
Rahul Padigela 69b37a376d
Merge pull request #1254 from scality/ft/ZENKO-404-lifecycleServiceAccount
ft: ZENKO-404 lifecycle service account
2018-06-01 15:52:36 -07:00
Jonathan Gramain b627c3219f ft: ZENKO-404 lifecycle service account
Add support for service account 'service-lifecycle' in addition to
existing 'service-replication'. ACL checks are shunted in both cases,
we may implement more fine-grained access control later.
2018-06-01 12:37:42 -07:00
Rahul Padigela a7905b8df9 chore: update version and dependencies 2018-05-31 11:48:28 -07:00
Rahul Padigela d396acf8fe
Merge pull request #1259 from scality/fwdport/z/1.0-master
Fwdport/z/1.0 master
2018-05-30 10:46:16 -07:00
Rahul Padigela 518e8c45c7 chore: use master dependency branches 2018-05-29 16:35:24 -07:00
Rahul Padigela 0025bdd3ec Merge remote-tracking branch 'origin/z/1.0' into fwdport/z/1.0-master 2018-05-29 16:34:17 -07:00
Rahul Padigela 7ac9ac6ac3
Merge pull request #1256 from scality/bf/lockMocksModule
bf: lock node-http-mocks module
2018-05-25 14:24:21 -07:00
Rahul Padigela 9af917ecf6 bf: lock node-http-mocks module
node-http-mocks version 1.7.0 has some breaking changes which fails the tests.
Fixing this version for now and we will revisit this when we do npm dependcy updates.
2018-05-25 12:33:29 -07:00
Rahul Padigela d758eddf71
Merge pull request #1250 from scality/bf/ZENKO-355-byteToMBConversion
bf: ZENKO-355 crr stats byte conversion
2018-05-22 19:16:31 -07:00
philipyoo 84e97d1350 bf: ignore gem install digest
Error installing digest gem because it is part of the
standard library and the gem is no longer available for
install
2018-05-22 19:10:16 -07:00
philipyoo acb5d72f32 bf: receive/send crr metrics sizes as bytes 2018-05-22 15:02:52 -07:00
Rahul Padigela 0712aa4246
Merge pull request #1241 from scality/bf/ZENKO-314-maxKeysZero
Bf/zenko 314 max keys zero
2018-05-17 11:56:04 -07:00
Alexander Chan 67eb20c1e4 bf: ZENKO-314 add check for max keys zero object listing 2018-05-16 15:40:29 -07:00
Alexander Chan 0b85e45ba8 ft: ZENKO-347 skip MongoDB version listing 2018-05-16 13:39:42 -07:00
Rahul Padigela 024544b726
Merge pull request #1233 from scality/test/orbit-management
Unit testing Orbit Management
2018-05-15 16:46:38 -07:00
Nicolas Humbert d88f04271f test: Orbit management 2018-05-15 15:14:05 -07:00
Rahul Padigela d77ff383bf
Merge pull request #1248 from scality/ft/ZENKO-347-skipMongoDbListingTests
Ft/zenko 347 skip mongo db listing tests
2018-05-13 14:21:33 -07:00
Rahul Padigela 08d9e6f45f
Merge pull request #1247 from scality/bf/ZENKO-350-addMDSearchValidation
Bf/zenko 350 add md search validation
2018-05-13 14:20:40 -07:00
Alexander Chan 4aaca47100 ft: ZENKO-347 skip MongoDB version listing 2018-05-11 19:35:36 -07:00
Alexander Chan f5132b911f bf: ZENKO-350 add MD search validation 2018-05-11 19:35:11 -07:00
Alexander Chan f2f2a5ea04 bf: ZENKO-349 add mongodb metadata env
Adds mongodb metadata environment variable to run mongodb only tests
2018-05-11 18:53:59 -07:00
Rached Ben Mustapha 56e60ab38e
Merge pull request #1244 from scality/fix/ZENKO-345-unduplicate-redis-settings
fix: use existing localCache redis configuration
2018-05-11 17:12:40 -07:00
Rached Ben Mustapha 3f7add2ca7 fix: use existing localCache redis configuration 2018-05-11 10:13:58 -07:00
Rahul Padigela ea9645c494
Merge pull request #1243 from scality/bf/ZENKO-332-azure-proxy
bf: ZENKO-332 fix Azure proxy localhost req
2018-05-11 10:09:30 -07:00
Dora Korpar 2e4234ea05 bf: ZENKO-332 fix Azure proxy localhost req 2018-05-10 20:33:53 -07:00
Nicolas HUMBERT a011400c69
Merge pull request #1225 from scality/rf/S3C-1399/refactor-backbeat-metrics-into-arsenal
rf: S3C-1399 Use CRR metrics from Arsenal
2018-04-30 10:06:35 -07:00
Rahul Padigela a79b5de1c3
Merge pull request #1232 from scality/bf/ZENKO-250-regexEval
bf: ZENKO-250 correctly evaluate regex pattern
2018-04-30 09:59:12 -07:00
Bennett Buchanan 6722336be7 rf: S3C-1399 Use CRR metrics from Arsenal 2018-04-27 17:05:22 -07:00
Alexander Chan ba820e5661 bf: ZENKO-250 correctly evaluate regex pattern
Original code will evaluate regex in `/pattern/` syntax incorrectly
Adds parser to have MD search recognize if a regex is in `/pattern/` syntax or
a simple string
2018-04-27 16:51:53 -07:00
Rahul Padigela 5d71417216
Merge pull request #1220 from scality/doc/ZENKO-259-mdsearch
doc: ZENKO-259 MD Search
2018-04-27 16:34:20 -07:00
Rahul Padigela e7a1ab2c0d
Merge pull request #1223 from scality/rf/S3C-1399/updateRedisConfig
rf: S3C-1399 Update Redis configuration
2018-04-27 14:52:39 -07:00
Alexander Chan 0ae117564a doc: ZENKO-259 mdsearch 2018-04-27 11:08:31 -07:00
Bennett Buchanan 399ecf5b6c rf: S3C-1399 Update Redis configuration 2018-04-27 10:38:18 -07:00
Rahul Padigela b603463f1e
Merge pull request #1227 from scality/ft/ZENKO-262-preferredReadWiteLocations
Ft/zenko 262 preferred read write locations
2018-04-27 10:30:07 -07:00
Nicolas HUMBERT 8a291caf9f
Merge pull request #1229 from scality/bf/ZENKO-275/mpuPropertiesGCP
bf: ZENKO-275 CRR to GCP MPU properties
2018-04-27 10:04:26 -07:00
Bennett Buchanan 0b9b203762 bf: ZENKO-275 CRR to GCP MPU properties 2018-04-26 19:00:07 -07:00
Alexander Chan 35f457d087 ft: ZENKO-262 preferred read/write locations 2018-04-26 16:51:34 -07:00
Nicolas HUMBERT d7595938c8
Merge pull request #1175 from scality/fix/do-not-include-untrimmed-headers-for-MPU-to-aws
FIX: Do not include untrimmed headers
2018-04-25 13:36:07 -07:00
Nicolas HUMBERT d52762c805
Merge pull request #1204 from scality/ft/ZENKO-229-get-using-location
ft/ZENKO-229 get using location
2018-04-25 11:36:37 -07:00
Bennett Buchanan d02dbebbf5 FIX: Do not include untrimmed headers 2018-04-25 10:22:34 -07:00
Rahul Padigela 6ef23a0659
Merge pull request #1228 from scality/ft/ZENKO-141-scality-dataserver
MongoDB as metadata and Scality as a data ba…
2018-04-24 18:59:07 -07:00
Rahul Padigela 9e1e00c05c
Merge pull request #1218 from scality/rf/ZENKO-250-portmdsearchtest
Rf/zenko 250 portmdsearchtest
2018-04-24 17:44:27 -07:00
Alexander Chan 6d67e98d96 rf: ZENKO-250 port md search test 2018-04-24 16:11:15 -07:00
Salim 1fd51c8fe1 ft: ZENKO-141 allows MongoDB/metadata Scality/data
Allows for the case where both MongoDB and Scality backend is enabled
and therefore would not require dataserver to be started
2018-04-24 12:10:17 -07:00
Nicolas HUMBERT 35d12ea43a
Merge pull request #1199 from scality/ft/S3C-1391-update-bucketinfo-uid
ft: edit ModelVersion usage of property uid on buckets
2018-04-24 11:46:10 -07:00
Dora Korpar d6e8201283 ft: ZENKO-229 get obj based on location 2018-04-24 10:38:19 -07:00
philipyoo a6265ab76d ft: edit usage of uid on buckets
Add uid on all buckets for future use and should be
backwards compatible.
2018-04-24 10:03:34 -07:00
Rahul Padigela 109ca1f98e chore: update scality dependencies 2018-04-23 12:45:21 -07:00
Rahul Padigela 92e8b1e96a chore: update version, description and author 2018-04-23 12:33:23 -07:00
Rahul Padigela 3c922e0f4a
Merge pull request #1224 from scality/fwdport/7.4-beta-master2
Fwdport/7.4 beta master2
2018-04-23 09:45:23 -07:00
Rahul Padigela 73f32773a1 chore: add package-lock.json 2018-04-23 00:09:05 -07:00
Rahul Padigela bb372c7991 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master2 2018-04-20 11:13:49 -07:00
Rahul Padigela 4ab977cd0f
Merge pull request #1219 from scality/fwdport/7.4-7.4-beta1
Fwdport/7.4 7.4 beta1
2018-04-20 09:59:24 -07:00
Rahul Padigela c2cf9cb82f
Merge pull request #1222 from scality/fx/management
fx: Orbit management - config userName + redundancy
2018-04-20 09:58:49 -07:00
Nicolas Humbert 1fecdcc19f fx: Using account userName from management configuration 2018-04-19 16:13:13 -07:00
Nicolas Humbert d7b4bdf143 fx: removing redundant getUUID() in Orbit management 2018-04-19 15:38:13 -07:00
Rahul Padigela 3e0ff40d7b Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta1 2018-04-19 10:55:50 -07:00
Rahul Padigela eb9729619e
Merge pull request #1207 from scality/ft/more-s3-locations-support
Ft/more s3 locations support
2018-04-18 15:40:28 -07:00
Alexander Chan 460dd0d83b add supportsVersioning unit tests 2018-04-17 16:46:03 -07:00
Alexander Chan dad7ee2be6 healthcheck: versioning optional 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha aada239a85 Handle http s3 endpoints 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha 822a2ce693 Rely on location for PutBucketVersioning 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha fd8905aae7 Make versioning optional for aws_s3 locations 2018-04-17 16:46:03 -07:00
Rached Ben Mustapha 6907df7a8e Configure versioning and path style for s3 locations 2018-04-17 16:46:03 -07:00
Rahul Padigela f8b41c9033
Merge pull request #1216 from scality/ft/use-uuid
rf: use uuid instead of date
2018-04-17 16:27:45 -07:00
Alexander Chan 216eca2609 rf: use uuid instead of date 2018-04-17 11:14:56 -07:00
Rahul Padigela c91e4ba7d5
Merge pull request #1213 from scality/fix/dockerentrypoint
fix: docker entrypoint for whitelisting healthcheck
2018-04-16 17:53:19 -07:00
Rahul Padigela fa0be87398 fix: docker entrypoint for whitelisting healthcheck 2018-04-16 17:52:04 -07:00
Rahul Padigela d899aae079
Merge pull request #1211 from scality/fix/Zenko-deployment
ft: add configurable healtcheck whitelisting
2018-04-16 17:17:29 -07:00
Rahul Padigela 4dfa7b0486 ft: add configurable healtcheck whitelisting 2018-04-16 16:59:24 -07:00
Rahul Padigela dbf1cf43d9
Merge pull request #1210 from scality/backport/master-rel/7.4-beta
backport master to rel/7.4-beta
2018-04-16 13:36:21 -07:00
Rahul Padigela 8a6763da83
Merge pull request #1203 from scality/ft/secure-channel
Ft/secure channel
2018-04-15 23:47:00 -07:00
Rached Ben Mustapha 4b9216c9a4 Plug push-based management 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha 6a286f32a9 Add push-based management module 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha 75e9012a11 Add ws dependency 2018-04-13 22:59:40 -07:00
Rached Ben Mustapha a8a2807e0d Split management module 2018-04-13 22:59:40 -07:00
Dora Korpar 1b008b4e9f ft: add date modified headers as condition for object delete
(cherry picked from commit a02d226b4c)
2018-04-13 17:22:26 -07:00
Rahul Padigela 76e2a2d03f
Merge pull request #1161 from scality/RF/updateIssueTemplateToMentionDiscourse
RF: ZNC-26: Issue Template to highlight Discourse
2018-04-13 17:01:11 -07:00
LaureVergeron 3db21278c5 RF: ZNC-26: Issue Template to highlight Discourse 2018-04-13 14:20:13 +02:00
Rahul Padigela 0fd0493654
Merge pull request #1188 from scality/fix/mongo-tests
Fix mongo tests
2018-04-11 09:53:20 -07:00
Salim a79e9080cd Fix mongo tests 2018-04-10 21:29:00 -07:00
Rahul Padigela abc4fd2c75
Merge pull request #1145 from scality/ft/objdel-add-modified-header-check
Ft/objdel add modified header check
2018-04-10 20:20:29 -07:00
Dora Korpar a02d226b4c ft: add date modified headers as condition for object delete 2018-04-10 17:05:38 -07:00
Rahul Padigela 11641b9fbd
Merge pull request #1202 from scality/fx/ft_management
FX: Running ft_management test with REMOTE_MANAGEMENT_DISABLE=1
2018-04-10 16:42:47 -07:00
Nicolas Humbert 72e646dc78 FX: Running ft_management test with REMOTE_MANAGEMENT_DISABLE=1 2018-04-10 15:10:19 -07:00
Rahul Padigela 227902877a
Merge pull request #1200 from scality/bf/ZENKO-144-https-location
Bf/zenko 144 https location
2018-04-10 14:06:41 -07:00
Rahul Padigela 3ef0caa0ba bf: pick http agent for non-ssl location backends 2018-04-10 09:07:39 -07:00
Rahul Padigela 881c909ef8 test: assert options for aws_s3 location constraints 2018-04-10 09:07:35 -07:00
Rahul Padigela 7c6df7a783 chore: proxyCompareUrl - address naming 2018-04-09 18:13:33 -07:00
Bennett Buchanan cf90391e86
Merge pull request #1154 from scality/ft/ZENKO-158/add-gcp-support-for-one-to-many
FT: Add changes for replication with GCP
2018-04-09 18:06:46 -07:00
Bennett Buchanan 85dee35b43 FT: Add changes for replication with GCP 2018-04-09 13:00:55 -07:00
Rahul Padigela c1a95fa1a9
Merge pull request #1198 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-09 12:46:33 -07:00
Alexander Chan 2426c9cc5a Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-09 10:26:40 -07:00
Rahul Padigela ef1dcee6f3
Merge pull request #1183 from scality/forward/orbit
Forward/orbit
2018-04-05 15:59:03 -07:00
Rahul Padigela 16217d852b
Merge pull request #1195 from scality/ft/ZENKO-225-o2m-s3conn
ft: provide pathStyle option to support S3C
2018-04-05 15:43:26 -07:00
Rahul Padigela 2fbd5ce8cc ft: provide pathStyle option to support S3C
This feature allows setting pathStyle requests option for location constraints so that
it can be relaxed for non-AWS S3 backends (for example S3 Connector)
2018-04-05 11:54:26 -07:00
Nicolas Humbert ed3ee6deae S3C-1354 Grant replication user permission to read/write buckets/objects 2018-04-05 10:59:15 -07:00
Nicolas HUMBERT b505656e86 FX: functional tests (#1190)
* FX: functional tests

* [SQUASH ME] FX TESTS: cleanup(config) changed
2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 61bb309af6 Make mongodb database configurable 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 9d1cd39d15 Disable remote management in tests 2018-04-05 10:59:15 -07:00
anurag4DSB af3ea03bcf changed PR as adviced in the review
Prom-client working for S3

corrected content type
2018-04-05 10:59:15 -07:00
anurag4DSB a612735448 finished S3 prom client integration
Signed-off-by: anurag4DSB <anurag.mittal@scality.com>
2018-04-05 10:59:15 -07:00
Anurag Mittal 8bf35f7b1d Tested and completed prom-client for S3
Signed-off-by: anurag4DSB <anurag.mittal@scality.com>
2018-04-05 10:59:15 -07:00
Nicolas Humbert bf811ecd87 S3C-1348 FT: Integrating 1-many locations replication into Orbit 2018-04-05 10:59:15 -07:00
Salim 8fe22c0b55 fix mongodb hosts entrypoint 2018-04-05 10:59:15 -07:00
Salim e54318d1c6 Multiple endpoints 2018-04-05 10:59:15 -07:00
Bennett Buchanan c760229757 FT: Add CRR statistics for Orbit (#1162) 2018-04-05 10:59:15 -07:00
Alexander Chan 06d2dbc8e7 FT: Adds support for GCP data backend 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha ebf131696b Fix: limit exported config items
Only send `overlayVersion` for now.
2018-04-05 10:59:15 -07:00
LaureVergeron b443b925a5 FT: ZENKO-118: Add support for Wasabi data backend 2018-04-05 10:59:15 -07:00
vrancurel 343b658d5f use hosts instead of host and port 2018-04-05 10:59:15 -07:00
Lauren Spiegel 337b049298 FT: Switch out mongo search for spark 2018-04-05 10:59:15 -07:00
Lauren Spiegel 798c42d6e3 TEMP: Disable remote mgmt for mem 2018-04-05 10:59:15 -07:00
Lauren Spiegel a95018a87b TEMP: Special treatment for clueso
Once orbit handles identities, all service accounts
should use the proper service account canonical id format
and canonical id's should be individually generated.
2018-04-05 10:59:15 -07:00
Lauren Spiegel 7828c0d56b FT: Add changes for clueso 2018-04-05 10:59:15 -07:00
Lauren Spiegel 2870477c61 chore: additional lint fixes 2018-04-05 10:59:15 -07:00
JianqinWang 1d191f4c5d bf: fix linter errors 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha fe129c7848 Ignore data/metadata dirs 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 126406738e Initial management implementation 2018-04-05 10:59:15 -07:00
Rached Ben Mustapha 36ff274b24 Default to S3DATA=multiple 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha e976c4d46e Update default configuration for Zenko 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha f474e7ee40 Pre-provision instance ID 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha e8e92871d5 Support target host header 2018-04-05 10:57:59 -07:00
Rached Ben Mustapha 79c6c57760 FT: Less verbose report handler 2018-04-05 10:57:59 -07:00
Rahul Padigela 9bc2b7379c
Merge pull request #1193 from scality/bf/proxy-config
bf: proxy config
2018-04-04 10:22:05 -07:00
Dora Korpar 33bac9802b bf: proxy config 2018-04-03 17:02:51 -07:00
Rahul Padigela 01f23805ad
Merge pull request #1189 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-02 12:31:03 -07:00
Alexander Chan 7d5b22b330 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-02 11:00:58 -07:00
Rahul Padigela 74ba89ec34
Merge pull request #1179 from scality/ft/ZENKO-144-https-proxy
Ft/zenko 144 https proxy
2018-04-02 10:33:34 -07:00
Salim d0ef347a31 ZENKO-144 Squid proxy docker file 2018-03-30 17:10:29 -07:00
Dora Korpar d923af04ba ZENKO-144 ft: https proxy 2018-03-30 17:10:29 -07:00
Rahul Padigela 063086c7bb
Merge pull request #1185 from scality/bf/content-length-type
bf: parse content length to int
2018-03-30 13:59:19 -07:00
Alexander Chan 6f9ee224f2 bf: parse content length to int 2018-03-30 11:33:43 -07:00
Rahul Padigela 42abc5ae02
Merge pull request #1182 from scality/bf/merge-error
BF: fix merge conflict error
2018-03-28 19:11:54 -07:00
Alexander Chan c632635cfc bf: removed duplicate function 2018-03-28 14:44:49 -07:00
Rahul Padigela 7902709abc
Merge pull request #1171 from scality/ft/S3C-1327-update-ModelVersion-md
ft: update ModelVersion.md
2018-03-27 16:33:45 -07:00
Rahul Padigela 5dff2506a4
Merge pull request #1169 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 15:31:19 -07:00
philipyoo 7b1ca7fec6 ft: update ModelVersion.md 2018-03-27 13:26:18 -07:00
Alexander Chan 502c1a4c11 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-23 11:29:00 -07:00
Rahul Padigela c1bda2b9ea
Merge pull request #1178 from scality/fx/missing-abort-prop
BF: GcpManagedUpload
2018-03-23 11:26:26 -07:00
Alexander Chan 8d688738f7 fx: missing error object in logHelper call 2018-03-22 14:24:16 -07:00
Rahul Padigela 48eeb9c501
Merge pull request #1118 from scality/ft/S3C-1115/GCP-S3-Func
FT S3C-1115: GCP S3 Functional Tests
2018-03-22 10:12:18 -07:00
Alexander Chan 1a44c52f30 S3C-1115 FT: Adds GCP Functional Tests
Adds S3 functional tests for GCP backend: GET/PUT/DEL
2018-03-21 21:04:17 -07:00
Rahul Padigela acda681fc1
Merge pull request #1176 from scality/fwd/7.4-orig-beta
Foward port rel/7.4 to rel/7.4-beta
2018-03-21 12:20:54 -07:00
Alexander Chan 66b6dc19ab FX: missing abort call object properties 2018-03-21 12:12:14 -07:00
Alexander Chan 60cce4a65f Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-orig-beta 2018-03-20 16:04:05 -07:00
Alexander Chan df1d26abad Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-19 15:24:47 -07:00
Rahul Padigela 801052b99e
Merge pull request #1164 from scality/ft/gcp-to-7.4-beta
Merge ft/GCP to rel/7.4-beta
2018-03-16 15:23:55 -07:00
Alexander Chan 6dfec0655a Merge remote-tracking branch 'origin/ft/GCP' into rel/7.4-beta 2018-03-16 11:13:24 -07:00
Bennett Buchanan 70379f2a42
Merge pull request #1163 from scality/fx/gcp-add-err
fx: add err
2018-03-16 10:41:26 -07:00
Alexander Chan cd5526188f fx: add err 2018-03-15 18:33:03 -07:00
Rahul Padigela a4cbbe0fcc ft: update package.json dependencies 2018-03-14 13:06:24 -07:00
Alexander Chan e5f966d0fa
Merge pull request #1123 from scality/ft/S3C-1179/GCP-S3-MPU
Ft/s3 c 1179/gcp s3 mpu
2018-03-10 19:59:21 -08:00
Alexander Chan 9c3a8f1573 Switch from npm to yarn 2018-03-09 18:18:49 -08:00
Rahul Padigela 208c024789
Merge pull request #1148 from scality/fwdport/7.4-master
Fwdport/7.4 master
2018-03-09 16:15:47 -08:00
Alexander Chan 8b629252e4 S3C-1179 FT: Adds GCP MPU Func
Adds MPU functionalities to GCP Backend
2018-03-09 15:06:16 -08:00
Rahul Padigela db6e8f35d5 Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-master 2018-03-09 13:41:15 -08:00
Rahul Padigela 0fc6e720e3
Merge pull request #1141 from scality/rf/back-to-xml
S3C-1115: rf: removes JSON API usage in GCP
2018-03-07 20:59:34 -08:00
Alexander Chan ecf9fe75bd rf: remove JSON API 2018-03-07 18:49:41 -08:00
Alexander Chan 2a70d20732 fx: GCP CopyObject Result Parse
Parse response values to correct type for copyObject method.
2018-03-07 18:47:56 -08:00
Bennett Buchanan d280b22d14 FIX: Remove GCP versioning healtchcheck on buckets 2018-03-07 18:47:56 -08:00
Alexander Chan cffb8a1914 FIX: Add GCP to externalVersioningErrorMessage 2018-03-07 18:47:56 -08:00
Alexander Chan 118a091ae5 S3C-1115 FT: Adds GCP functionality to S3
Adds common files/changes for adding GCP as a backend in S3.
2018-03-07 18:47:56 -08:00
Alexander Chan c3fc35dcf3 S3C-1115 FT: GCP Object Tagging
Implements GCP Object Tagging PUT/GET/DELETE APIs
2018-03-07 18:47:56 -08:00
Alexander Chan 6c03f6de30 S3C-1179 FT: GCP Upload Method
Adds a GcpManagedUpload for the upload method to handle stream uploads.
The GcpManagedUpload will handle the switching between putObject or
multipart upload depending on the given content size.
2018-03-07 18:47:56 -08:00
Alexander Chan 775be47f59 S3C-1179 FT: GCP MPU APIs
Implements GCP MPU APIs: listParts, uploadPart, uploadPartCopy,
abortMPU, createMPU, and completeMPU
Implements MPU Helper Class
2018-03-07 18:47:56 -08:00
Alexander Chan 7fd3f05d27 FX: GCP Credentials
Fixes the bug where it doesn't handle retrieving environment variables
with '\n', newline, correctly.
2018-03-07 18:47:56 -08:00
Alexander Chan 87466286b6 S3C-1115 FT: GCP COPY
Implements GCP Object COPY API
2018-03-07 18:47:56 -08:00
Alexander Chan a89a26aaa1 FX: GCP PUT API Test
Fixes the use of an undefined variable that leads to creation of buckets
that aren't deleted on GCP.
2018-03-07 18:47:56 -08:00
Alexander Chan 54037fc1bd S3C-1115 FT: GCP Object APIs
Implements GCP Object PUT/GET/GET/DELETE APIs
2018-03-07 18:47:56 -08:00
Alexander Chan 3f42030c86 [FIX] GCP Healthcheck
Removes one layer of nesting that leads to errors not being detected.
2018-03-07 18:47:56 -08:00
Alexander Chan ecec50845c S3C-1115 FT: Adds GCP Backend healthcheck
Implements the APIs to enable backend healthcheck of GCP storages buckets
2018-03-07 18:47:56 -08:00
Rahul Padigela a77a21f957
Merge pull request #1129 from scality/ft/pensieve-credentials
rf: moving backends to Arsenal
2018-03-07 15:07:57 -08:00
JianqinWang ec4427c2c0 rf: rename mongodb replica hosts 2018-03-06 16:46:11 -08:00
JianqinWang fb09cb9a39 ZENKO-140 - rf: move metadata backends to Arsenal 2018-03-06 16:46:11 -08:00
ironman-machine 0f8b957901 merge #1119 2018-02-16 03:42:59 +00:00
Lauren Spiegel fb52735a23 CHORE: Update mongo version 2018-02-15 15:51:21 -08:00
Bennett Buchanan b5aa64ec89
Merge pull request #1128 from scality/FIX/typoInPublicCloudDoc
FIX: Typo in Public Clouds Doc
2018-02-15 12:03:39 -08:00
LaureVergeron c6099ee186 FIX: Typo in Public Clouds Doc 2018-02-15 16:03:59 +01:00
ThibaultRiviere 6ec0562afa
Merge pull request #1105 from scality/fwdport_7.4_master
Fwdport 7.4 master
2018-02-07 15:58:15 +01:00
Thibault Riviere 9d58964a60 Merge branch 'rel/7.4' into fwdport_7.4_master 2018-02-07 13:39:34 +01:00
ironman-machine c7137d1500 merge #1107 2018-02-07 01:05:52 +00:00
Alexander Chan 4dc537e5e0 FX: GCP PUT API Test
Fixes the use of an undefined variable that leads to creation of buckets
that aren't deleted on GCP.
2018-02-06 15:25:22 -08:00
Bennett Buchanan 1dcbacf594
Merge pull request #1095 from scality/ft/S3C-1115/GCP-APIs-Object
Ft/s3 c 1115/gcp apis object
2018-02-04 11:16:49 -08:00
Alexander Chan 60c1281ee8 S3C-1115 FT: GCP Object APIs
Implements GCP Object PUT/GET/GET/DELETE APIs
2018-02-02 17:40:37 -08:00
Lauren Spiegel effb39cad8
Merge pull request #1084 from scality/goodbye/mongo
Goodbye/mongo
2018-02-02 17:00:25 -08:00
Lauren Spiegel bc0e0ad057 Move mongo client to arsenal 2018-02-02 15:12:11 -08:00
Bennett Buchanan fcb5c0ea07
Merge pull request #1103 from scality/fix/update-readme-for-configuration-section
FIX: Update link to Configuration doc in README.md
2018-02-02 13:47:21 -08:00
Bennett Buchanan 0b002d8f15 FIX: Update link to Configuration doc in README.md 2018-02-02 10:35:14 -08:00
ironman-machine fdc2e580b0 merge #1093 2018-02-02 01:57:23 +00:00
Alexander Chan 61d6e4bfc7 S3C-1115 FT: Adds GCP Backend healthcheck
Implements the APIs to enable backend healthcheck of GCP storages buckets
2018-02-01 15:40:15 -08:00
Lauren Spiegel cd252ff793
Merge pull request #1097 from scality/ft/kub-mongo
Improved logging and Replica set compatibility
2018-02-01 15:37:14 -08:00
Salim d195267f5f Improved Mongo Loggging 2018-01-31 17:46:02 -08:00
Salim 24ae3989aa Replica Set Compatibility 2018-01-31 15:46:40 -08:00
273 changed files with 18240 additions and 3539 deletions

3
.dockerignore Normal file
View File

@ -0,0 +1,3 @@
node_modules
localData/*
localMetadata/*

View File

@ -1,19 +1,32 @@
# Issue template # General support information
If you are reporting a new issue, make sure that we do not have any GitHub Issues are **reserved** for actionable bug reports (including
duplicates already open. You can ensure this by searching the issue list for documentation inaccuracies), and feature requests.
this repository. If there is a duplicate, please close your issue and add a **All questions** (regarding configuration, use cases, performance, community,
comment to the existing issue instead. events, setup and usage recommendations, among other things) should be asked on
the **[Zenko Forum](http://forum.zenko.io/)**.
## General support information > Questions opened as GitHub issues will systematically be closed, and moved to
> the [Zenko Forum](http://forum.zenko.io/).
GitHub Issues are reserved for actionable bug reports and feature requests. --------------------------------------------------------------------------------
General questions should be sent to the
[S3 scality server Forum](http://forum.scality.com/). ## Avoiding duplicates
When reporting a new issue/requesting a feature, make sure that we do not have
any duplicates already open:
- search the issue list for this repository (use the search bar, select
"Issues" on the left pane after searching);
- if there is a duplicate, please do not open your issue, and add a comment
to the existing issue instead.
--------------------------------------------------------------------------------
## Bug report information ## Bug report information
(delete this section if not applicable) (delete this section (everything between the lines) if you're not reporting a bug
but requesting a feature)
### Description ### Description
@ -29,13 +42,22 @@ Describe the results you received
### Expected result ### Expected result
Describe the results you expecteds Describe the results you expected
### Additional information: (Node.js version, Docker version, etc) ### Additional information
- Node.js version,
- Docker version,
- npm version,
- distribution/OS,
- optional: anything else you deem helpful to us.
--------------------------------------------------------------------------------
## Feature Request ## Feature Request
(delete this section if not applicable) (delete this section (everything between the lines) if you're not requesting
a feature but reporting a bug)
### Proposal ### Proposal
@ -52,3 +74,14 @@ What you would like to happen
### Use case ### Use case
Please provide use cases for changing the current behavior Please provide use cases for changing the current behavior
### Additional information
- Is this request for your company? Y/N
- If Y: Company name:
- Are you using any Scality Enterprise Edition products (RING, Zenko EE)? Y/N
- Are you willing to contribute this feature yourself?
- Position/Title:
- How did you hear about us?
--------------------------------------------------------------------------------

5
.gitignore vendored
View File

@ -22,9 +22,14 @@ coverage
# Compiled binary addons (http://nodejs.org/api/addons.html) # Compiled binary addons (http://nodejs.org/api/addons.html)
build/Release build/Release
# Sphinx build dir
_build
# Dependency directory # Dependency directory
# https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git
node_modules node_modules
yarn.lock
.tox
# Junit directory # Junit directory
junit junit

View File

@ -19,6 +19,9 @@ COPY ./ ./
VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"] VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
ENV NO_PROXY localhost,127.0.0.1
ENV no_proxy localhost,127.0.0.1
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"] ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
CMD [ "npm", "start" ] CMD [ "npm", "start" ]

View File

@ -121,8 +121,9 @@ where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be constraint, the endpoint of the PUT request will be
used to determine location. used to determine location.
See the Configuration section below to learn how to set See the Configuration section in our documentation
location constraints. [here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend ## Run it with an in-memory backend

View File

@ -0,0 +1,4 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
require('../lib/nfs/utilities.js').createBucketWithNFSEnabled();

100
bin/search_bucket.js Executable file
View File

@ -0,0 +1,100 @@
#!/bin/sh
// 2>/dev/null ; exec "$(which nodejs 2>/dev/null || which node)" "$0" "$@"
'use strict'; // eslint-disable-line strict
const { auth } = require('arsenal');
const commander = require('commander');
const http = require('http');
const https = require('https');
const logger = require('../lib/utilities/logger');
function _performSearch(host,
port,
bucketName,
query,
accessKey,
secretKey,
verbose, ssl) {
const escapedSearch = encodeURIComponent(query);
const options = {
host,
port,
method: 'GET',
path: `/${bucketName}/?search=${escapedSearch}`,
headers: {
'Content-Length': 0,
},
rejectUnauthorized: false,
};
const transport = ssl ? https : http;
const request = transport.request(options, response => {
if (verbose) {
logger.info('response status code', {
statusCode: response.statusCode,
});
logger.info('response headers', { headers: response.headers });
}
const body = [];
response.setEncoding('utf8');
response.on('data', chunk => body.push(chunk));
response.on('end', () => {
if (response.statusCode >= 200 && response.statusCode < 300) {
logger.info('Success');
process.stdout.write(body.join(''));
process.exit(0);
} else {
logger.error('request failed with HTTP Status ', {
statusCode: response.statusCode,
body: body.join(''),
});
process.exit(1);
}
});
});
// generateV4Headers exepects request object with path that does not
// include query
request.path = `/${bucketName}`;
auth.client.generateV4Headers(request, { search: query },
accessKey, secretKey, 's3');
request.path = `/${bucketName}?search=${escapedSearch}`;
if (verbose) {
logger.info('request headers', { headers: request._headers });
}
request.end();
}
/**
* This function is used as a binary to send a request to S3 to perform a
* search on the objects in a bucket
*
* @return {undefined}
*/
function searchBucket() {
// TODO: Include other bucket listing possible query params?
commander
.version('0.0.1')
.option('-a, --access-key <accessKey>', 'Access key id')
.option('-k, --secret-key <secretKey>', 'Secret access key')
.option('-b, --bucket <bucket>', 'Name of the bucket')
.option('-q, --query <query>', 'Search query')
.option('-h, --host <host>', 'Host of the server')
.option('-p, --port <port>', 'Port of the server')
.option('-s', '--ssl', 'Enable ssl')
.option('-v, --verbose')
.parse(process.argv);
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
commander;
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
logger.error('missing parameter');
commander.outputHelp();
process.exit(1);
}
_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
ssl);
}
searchBucket();

View File

@ -1,45 +0,0 @@
---
general:
branches:
ignore:
- /^ultron\/.*/ # Ignore ultron/* branches
artifacts:
- coverage/
machine:
node:
version: 6.13.1
services:
- redis
- docker
ruby:
version: "2.4.1"
environment:
CXX: g++-4.9
ENABLE_LOCAL_CACHE: true
REPORT_TOKEN: report-token-1
hosts:
bucketwebsitetester.s3-website-us-east-1.amazonaws.com: 127.0.0.1
dependencies:
override:
- rm -rf node_modules
- npm install
post:
- sudo pip install flake8 yamllint
- sudo pip install s3cmd==1.6.1
# fog and ruby testing dependencies
- gem install fog-aws -v 1.3.0
- gem install mime-types -v 3.1
- gem install rspec -v 3.5
- gem install json
# java sdk dependencies
- sudo apt-get install -y -q default-jdk
test:
override:
- docker run --name squid-proxy -d --net=host
--publish 3128:3128 sameersbn/squid:3.3.8-23
- bash tests.bash:
parallel: true

View File

@ -19,5 +19,38 @@
"access": "accessKey2", "access": "accessKey2",
"secret": "verySecretKey2" "secret": "verySecretKey2"
}] }]
},
{
"name": "Clueso",
"email": "inspector@clueso.info",
"arn": "arn:aws:iam::123456789014:root",
"canonicalID": "http://acs.zenko.io/accounts/service/clueso",
"shortid": "123456789014",
"keys": [{
"access": "cluesoKey1",
"secret": "cluesoSecretKey1"
}]
},
{
"name": "Replication",
"email": "inspector@replication.info",
"arn": "arn:aws:iam::123456789015:root",
"canonicalID": "http://acs.zenko.io/accounts/service/replication",
"shortid": "123456789015",
"keys": [{
"access": "replicationKey1",
"secret": "replicationSecretKey1"
}]
},
{
"name": "Lifecycle",
"email": "inspector@lifecycle.info",
"arn": "arn:aws:iam::123456789016:root",
"canonicalID": "http://acs.zenko.io/accounts/service/lifecycle",
"shortid": "123456789016",
"keys": [{
"access": "lifecycleKey1",
"secret": "lifecycleSecretKey1"
}]
}] }]
} }

View File

@ -8,7 +8,9 @@
"cloudserver-front": "us-east-1", "cloudserver-front": "us-east-1",
"s3.docker.test": "us-east-1", "s3.docker.test": "us-east-1",
"127.0.0.2": "us-east-1", "127.0.0.2": "us-east-1",
"s3.amazonaws.com": "us-east-1" "s3.amazonaws.com": "us-east-1",
"zenko-cloudserver-replicator": "us-east-1",
"lb": "us-east-1"
}, },
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com", "websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
"s3-website.us-east-2.amazonaws.com", "s3-website.us-east-2.amazonaws.com",
@ -23,7 +25,8 @@
"s3-website-eu-west-1.amazonaws.com", "s3-website-eu-west-1.amazonaws.com",
"s3-website-sa-east-1.amazonaws.com", "s3-website-sa-east-1.amazonaws.com",
"s3-website.localhost", "s3-website.localhost",
"s3-website.scality.test"], "s3-website.scality.test",
"zenkoazuretest.blob.core.windows.net"],
"replicationEndpoints": [{ "replicationEndpoints": [{
"site": "zenko", "site": "zenko",
"servers": ["127.0.0.1:8000"], "servers": ["127.0.0.1:8000"],
@ -32,6 +35,10 @@
"site": "us-east-2", "site": "us-east-2",
"type": "aws_s3" "type": "aws_s3"
}], }],
"backbeat": {
"host": "localhost",
"port": 8900
},
"cdmi": { "cdmi": {
"host": "localhost", "host": "localhost",
"port": 81, "port": 81,
@ -45,7 +52,7 @@
"host": "localhost", "host": "localhost",
"port": 8500 "port": 8500
}, },
"clusters": 10, "clusters": 1,
"log": { "log": {
"logLevel": "info", "logLevel": "info",
"dumpLevel": "error" "dumpLevel": "error"
@ -70,12 +77,32 @@
"port": 9991 "port": 9991
}, },
"recordLog": { "recordLog": {
"enabled": false, "enabled": true,
"recordLogName": "s3-recordlog" "recordLogName": "s3-recordlog"
}, },
"mongodb": { "mongodb": {
"host": "localhost", "replicaSetHosts": "localhost:27018,localhost:27019,localhost:27020",
"port": 27018, "writeConcern": "majority",
"database": "metadata" "replicaSet": "rs0",
"readPreference": "primary",
"database": "metadata"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
} }
} }

View File

@ -39,6 +39,8 @@ const constants = {
// once the multipart upload is complete. // once the multipart upload is complete.
mpuBucketPrefix: 'mpuShadowBucket', mpuBucketPrefix: 'mpuShadowBucket',
blacklistedPrefixes: { bucket: [], object: [] }, blacklistedPrefixes: { bucket: [], object: [] },
// GCP Object Tagging Prefix
gcpTaggingPrefix: 'aws-tag-',
// PublicId is used as the canonicalID for a request that contains // PublicId is used as the canonicalID for a request that contains
// no authentication information. Requestor can access // no authentication information. Requestor can access
// only public resources // only public resources
@ -64,11 +66,21 @@ const constants = {
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html // http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
minimumAllowedPartSize: 5242880, minimumAllowedPartSize: 5242880,
// AWS sets a maximum total parts limit
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html
maximumAllowedPartCount: 10000,
gcpMaximumAllowedPartCount: 1024,
// Max size on put part or copy part is 5GB. For functional // Max size on put part or copy part is 5GB. For functional
// testing use 110 MB as max // testing use 110 MB as max
maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 : maximumAllowedPartSize: process.env.MPU_TESTING === 'yes' ? 110100480 :
5368709120, 5368709120,
// Max size allowed in a single put object request is 5GB
// https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
maximumAllowedUploadSize: 5368709120,
// AWS states max size for user-defined metadata (x-amz-meta- headers) is // AWS states max size for user-defined metadata (x-amz-meta- headers) is
// 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html // 2 KB: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
// In testing, AWS seems to allow up to 88 more bytes, so we do the same. // In testing, AWS seems to allow up to 88 more bytes, so we do the same.
@ -105,6 +117,7 @@ const constants = {
legacyLocations: ['sproxyd', 'legacy'], legacyLocations: ['sproxyd', 'legacy'],
/* eslint-disable camelcase */ /* eslint-disable camelcase */
externalBackends: { aws_s3: true, azure: true, gcp: true }, externalBackends: { aws_s3: true, azure: true, gcp: true },
replicationBackends: { aws_s3: true, azure: true, gcp: true },
// some of the available data backends (if called directly rather // some of the available data backends (if called directly rather
// than through the multiple backend gateway) need a key provided // than through the multiple backend gateway) need a key provided
// as a string as first parameter of the get/delete methods. // as a string as first parameter of the get/delete methods.
@ -113,13 +126,17 @@ const constants = {
// for external backends, don't call unless at least 1 minute // for external backends, don't call unless at least 1 minute
// (60,000 milliseconds) since last call // (60,000 milliseconds) since last call
externalBackendHealthCheckInterval: 60000, externalBackendHealthCheckInterval: 60000,
versioningNotImplBackends: { azure: true }, versioningNotImplBackends: { azure: true, gcp: true },
mpuMDStoredExternallyBackend: { aws_s3: true }, mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
skipBatchDeleteBackends: { azure: true, gcp: true },
s3HandledBackends: { azure: true, gcp: true },
hasCopyPartBackends: { aws_s3: true, gcp: true },
/* eslint-enable camelcase */ /* eslint-enable camelcase */
mpuMDStoredOnS3Backend: { azure: true }, mpuMDStoredOnS3Backend: { azure: true },
azureAccountNameRegex: /^[a-z0-9]{3,24}$/, azureAccountNameRegex: /^[a-z0-9]{3,24}$/,
base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' + base64Regex: new RegExp('^(?:[A-Za-z0-9+/]{4})*' +
'(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'), '(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$'),
productName: 'APN/1.0 Scality/1.0 Scality CloudServer for Zenko',
// user metadata applied on zenko objects // user metadata applied on zenko objects
zenkoIDHeader: 'x-amz-meta-zenko-instance-id', zenkoIDHeader: 'x-amz-meta-zenko-instance-id',
bucketOwnerActions: [ bucketOwnerActions: [

View File

@ -17,13 +17,17 @@ process.on('uncaughtException', err => {
if (config.backends.data === 'file' || if (config.backends.data === 'file' ||
(config.backends.data === 'multiple' && (config.backends.data === 'multiple' &&
config.backends.metadata !== 'scality')) { config.backends.metadata !== 'scality')) {
const dataServer = new arsenal.network.rest.RESTServer( const dataServer = new arsenal.network.rest.RESTServer({
{ bindAddress: config.dataDaemon.bindAddress, bindAddress: config.dataDaemon.bindAddress,
port: config.dataDaemon.port, port: config.dataDaemon.port,
dataStore: new arsenal.storage.data.file.DataFileStore( dataStore: new arsenal.storage.data.file.DataFileStore({
{ dataPath: config.dataDaemon.dataPath, dataPath: config.dataDaemon.dataPath,
log: config.log }), log: config.log,
log: config.log }); noSync: config.dataDaemon.noSync,
noCache: config.dataDaemon.noCache,
}),
log: config.log,
});
dataServer.setup(err => { dataServer.setup(err => {
if (err) { if (err) {
logger.error('Error initializing REST data server', logger.error('Error initializing REST data server',

View File

@ -6,14 +6,15 @@ set -e
# modifying config.json # modifying config.json
JQ_FILTERS_CONFIG="." JQ_FILTERS_CONFIG="."
# ENDPOINT var can accept comma separated values
# for multiple endpoint locations
if [[ "$ENDPOINT" ]]; then if [[ "$ENDPOINT" ]]; then
HOST_NAME="$ENDPOINT" IFS="," read -ra HOST_NAMES <<< "$ENDPOINT"
fi for host in "${HOST_NAMES[@]}"; do
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$host\"]=\"us-east-1\""
if [[ "$HOST_NAME" ]]; then done
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .restEndpoints[\"$HOST_NAME\"]=\"us-east-1\"" echo "Host name has been modified to ${HOST_NAMES[@]}"
echo "Host name has been modified to $HOST_NAME" echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with ${HOST_NAMES[@]}"
echo "Note: In your /etc/hosts file on Linux, OS X, or Unix with root permissions, make sure to associate 127.0.0.1 with $HOST_NAME"
fi fi
if [[ "$LOG_LEVEL" ]]; then if [[ "$LOG_LEVEL" ]]; then
@ -25,7 +26,7 @@ if [[ "$LOG_LEVEL" ]]; then
fi fi
fi fi
if [[ "$SSL" && "$HOST_NAME" ]]; then if [[ "$SSL" && "$HOST_NAMES" ]]; then
# This condition makes sure that the certificates are not generated twice. (for docker restart) # This condition makes sure that the certificates are not generated twice. (for docker restart)
if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then if [ ! -f ./ca.key ] || [ ! -f ./ca.crt ] || [ ! -f ./server.key ] || [ ! -f ./server.crt ] ; then
# Compute config for utapi tests # Compute config for utapi tests
@ -36,15 +37,15 @@ prompt = no
req_extensions = s3_req req_extensions = s3_req
[req_distinguished_name] [req_distinguished_name]
CN = ${HOST_NAME} CN = ${HOST_NAMES[0]}
[s3_req] [s3_req]
subjectAltName = @alt_names subjectAltName = @alt_names
extendedKeyUsage = serverAuth, clientAuth extendedKeyUsage = serverAuth, clientAuth
[alt_names] [alt_names]
DNS.1 = *.${HOST_NAME} DNS.1 = *.${HOST_NAMES[0]}
DNS.2 = ${HOST_NAME} DNS.2 = ${HOST_NAMES[0]}
EOF EOF
@ -81,6 +82,18 @@ if [[ "$METADATA_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\"" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
fi fi
if [[ "$MONGODB_HOSTS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSetHosts=\"$MONGODB_HOSTS\""
fi
if [[ "$MONGODB_RS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.replicaSet=\"$MONGODB_RS\""
fi
if [[ "$MONGODB_DATABASE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.database=\"$MONGODB_DATABASE\""
fi
if [ -z "$REDIS_HA_NAME" ]; then if [ -z "$REDIS_HA_NAME" ]; then
REDIS_HA_NAME='mymaster' REDIS_HA_NAME='mymaster'
fi fi
@ -113,11 +126,67 @@ if [[ "$RECORDLOG_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true" JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .recordLog.enabled=true"
fi fi
if [[ "$STORAGE_LIMIT_ENABLED" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .utapi.metrics[.utapi.metrics | length]=\"location\""
fi
if [[ "$CRR_METRICS_HOST" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.host=\"$CRR_METRICS_HOST\""
fi
if [[ "$CRR_METRICS_PORT" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .backbeat.port=$CRR_METRICS_PORT"
fi
if [[ "$HEALTHCHECKS_ALLOWFROM" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .healthChecks.allowFrom=[\"$HEALTHCHECKS_ALLOWFROM\"]"
fi
# external backends http(s) agent config
# AWS
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAlive=$AWS_S3_HTTPAGENT_KEEPALIVE"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.keepAliveMsecs=$AWS_S3_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.aws_s3.httpAgent.maxFreeSockets=$AWS_S3_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
#GCP
if [[ "$GCP_HTTPAGENT_KEEPALIVE" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAlive=$GCP_HTTPAGENT_KEEPALIVE"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.keepAliveMsecs=$GCP_HTTPAGENT_KEEPALIVE_MS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_SOCKETS"
fi
if [[ "$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS" ]]; then
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .externalBackends.gcp.httpAgent.maxFreeSockets=$GCP_HTTPAGENT_KEEPALIVE_MAX_FREE_SOCKETS"
fi
if [[ $JQ_FILTERS_CONFIG != "." ]]; then if [[ $JQ_FILTERS_CONFIG != "." ]]; then
jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp jq "$JQ_FILTERS_CONFIG" config.json > config.json.tmp
mv config.json.tmp config.json mv config.json.tmp config.json
fi fi
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
fi
# s3 secret credentials for Zenko # s3 secret credentials for Zenko
if [ -r /run/secrets/s3-credentials ] ; then if [ -r /run/secrets/s3-credentials ] ; then
. /run/secrets/s3-credentials . /run/secrets/s3-credentials

View File

@ -27,7 +27,7 @@ including null versions and delete markers, described in the above
links. links.
Implementation of Bucket Versioning in Zenko CloudServer Implementation of Bucket Versioning in Zenko CloudServer
----------------------------------------- --------------------------------------------------------
Overview of Metadata and API Component Roles Overview of Metadata and API Component Roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -179,12 +179,13 @@ PUT
the master version with this version. the master version with this version.
- ``versionId: <versionId>`` create or update a specific version (for updating - ``versionId: <versionId>`` create or update a specific version (for updating
version's ACL or tags, or remote updates in geo-replication) version's ACL or tags, or remote updates in geo-replication)
- if the version identified by ``versionId`` happens to be the latest
* if the version identified by ``versionId`` happens to be the latest
version, the master version will be updated as well version, the master version will be updated as well
- if the master version is not as recent as the version identified by * if the master version is not as recent as the version identified by
``versionId``, as may happen with cross-region replication, the master ``versionId``, as may happen with cross-region replication, the master
will be updated as well will be updated as well
- note that with ``versionId`` set to an empty string ``''``, it will * note that with ``versionId`` set to an empty string ``''``, it will
overwrite the master version only (same as no options, but the master overwrite the master version only (same as no options, but the master
version will have a ``versionId`` property set in its metadata like version will have a ``versionId`` property set in its metadata like
any other version). The ``versionId`` will never be exposed to an any other version). The ``versionId`` will never be exposed to an
@ -208,10 +209,13 @@ A deletion targeting the latest version of an object has to:
- delete the specified version identified by ``versionId`` - delete the specified version identified by ``versionId``
- replace the master version with a version that is a placeholder for - replace the master version with a version that is a placeholder for
deletion deletion
- this version contains a special keyword, 'isPHD', to indicate the - this version contains a special keyword, 'isPHD', to indicate the
master version was deleted and needs to be updated master version was deleted and needs to be updated
- initiate a repair operation to update the value of the master - initiate a repair operation to update the value of the master
version: version:
- involves listing the versions of the object and get the latest - involves listing the versions of the object and get the latest
version to replace the placeholder delete version version to replace the placeholder delete version
- if no more versions exist, metadata deletes the master version, - if no more versions exist, metadata deletes the master version,
@ -755,16 +759,16 @@ command in the Zenko CloudServer directory:
This will open two ports: This will open two ports:
- one is based on socket.io and is used for metadata transfers (9990 by - one is based on socket.io and is used for metadata transfers (9990 by
default) default)
- the other is a REST interface used for data transfers (9991 by - the other is a REST interface used for data transfers (9991 by
default) default)
Then, one or more instances of Zenko CloudServer without the dmd can be started Then, one or more instances of Zenko CloudServer without the dmd can be started
elsewhere with: elsewhere with:
:: .. code:: sh
npm run start_s3server npm run start_s3server
@ -792,10 +796,10 @@ access:
To run a remote dmd, you have to do the following: To run a remote dmd, you have to do the following:
- change both ``"host"`` attributes to the IP or host name where the - change both ``"host"`` attributes to the IP or host name where the
dmd is run. dmd is run.
- Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and - Modify the ``"bindAddress"`` attributes in ``"metadataDaemon"`` and
``"dataDaemon"`` sections where the dmd is run to accept remote ``"dataDaemon"`` sections where the dmd is run to accept remote
connections (e.g. ``"::"``) connections (e.g. ``"::"``)
@ -831,13 +835,13 @@ and ``createReadStream``. They more or less map the parameters accepted
by the corresponding calls in the LevelUp implementation of LevelDB. by the corresponding calls in the LevelUp implementation of LevelDB.
They differ in the following: They differ in the following:
- The ``sync`` option is ignored (under the hood, puts are gathered - The ``sync`` option is ignored (under the hood, puts are gathered
into batches which have their ``sync`` property enforced when they into batches which have their ``sync`` property enforced when they
are committed to the storage) are committed to the storage)
- Some additional versioning-specific options are supported - Some additional versioning-specific options are supported
- ``createReadStream`` becomes asynchronous, takes an additional - ``createReadStream`` becomes asynchronous, takes an additional
callback argument and returns the stream in the second callback callback argument and returns the stream in the second callback
parameter parameter
@ -847,10 +851,10 @@ with ``DEBUG='socket.io*'`` environment variable set.
One parameter controls the timeout value after which RPC commands sent One parameter controls the timeout value after which RPC commands sent
end with a timeout error, it can be changed either: end with a timeout error, it can be changed either:
- via the ``DEFAULT_CALL_TIMEOUT_MS`` option in - via the ``DEFAULT_CALL_TIMEOUT_MS`` option in
``lib/network/rpc/rpc.js`` ``lib/network/rpc/rpc.js``
- or in the constructor call of the ``MetadataFileClient`` object (in - or in the constructor call of the ``MetadataFileClient`` object (in
``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``. ``lib/metadata/bucketfile/backend.js`` as ``callTimeoutMs``.
Default value is 30000. Default value is 30000.
@ -864,10 +868,10 @@ can tune the behavior (for better throughput or getting it more robust
on weak networks), they have to be set in ``mdserver.js`` file directly, on weak networks), they have to be set in ``mdserver.js`` file directly,
as there is no support in ``config.json`` for now for those options: as there is no support in ``config.json`` for now for those options:
- ``streamMaxPendingAck``: max number of pending ack events not yet - ``streamMaxPendingAck``: max number of pending ack events not yet
received (default is 5) received (default is 5)
- ``streamAckTimeoutMs``: timeout for receiving an ack after an output - ``streamAckTimeoutMs``: timeout for receiving an ack after an output
stream packet is sent to the client (default is 5000) stream packet is sent to the client (default is 5000)
Data exchange through the REST data port Data exchange through the REST data port
@ -918,17 +922,17 @@ Listing Types
We use three different types of metadata listing for various operations. We use three different types of metadata listing for various operations.
Here are the scenarios we use each for: Here are the scenarios we use each for:
- 'Delimiter' - when no versions are possible in the bucket since it is - 'Delimiter' - when no versions are possible in the bucket since it is
an internally-used only bucket which is not exposed to a user. an internally-used only bucket which is not exposed to a user.
Namely, Namely,
1. to list objects in the "user's bucket" to respond to a GET SERVICE 1. to list objects in the "user's bucket" to respond to a GET SERVICE
request and request and
2. to do internal listings on an MPU shadow bucket to complete multipart 2. to do internal listings on an MPU shadow bucket to complete multipart
upload operations. upload operations.
- 'DelimiterVersion' - to list all versions in a bucket - 'DelimiterVersion' - to list all versions in a bucket
- 'DelimiterMaster' - to list just the master versions of objects in a - 'DelimiterMaster' - to list just the master versions of objects in a
bucket bucket
Algorithms Algorithms

View File

@ -178,7 +178,7 @@ Ruby
~~~~ ~~~~
`AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__ `AWS SDK for Ruby - Version 2 <http://docs.aws.amazon.com/sdkforruby/api/>`__
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. code:: ruby .. code:: ruby
@ -239,6 +239,7 @@ Python
Client integration Client integration
.. code:: python .. code:: python
import boto3 import boto3
client = boto3.client( client = boto3.client(
@ -253,6 +254,7 @@ Client integration
Full integration (with object mapping) Full integration (with object mapping)
.. code:: python .. code:: python
import os import os
from botocore.utils import fix_s3_host from botocore.utils import fix_s3_host

View File

@ -33,7 +33,7 @@ to your ``locationConfig.json`` file with the ``aws_s3`` location type:
.. code:: json .. code:: json
(...) (...)
"awsbackend": { "awsbackend": {
"type": "aws_s3", "type": "aws_s3",
"details": { "details": {
@ -43,7 +43,7 @@ to your ``locationConfig.json`` file with the ``aws_s3`` location type:
"credentialsProfile": "aws_hosted_profile" "credentialsProfile": "aws_hosted_profile"
} }
} }
(...) (...)
You will also have to edit your AWS credentials file to be able to use your You will also have to edit your AWS credentials file to be able to use your
command line tool of choice. This file should mention credentials for all the command line tool of choice. This file should mention credentials for all the
@ -52,12 +52,12 @@ profiles.
.. code:: json .. code:: json
[default] [default]
aws_access_key_id=accessKey1 aws_access_key_id=accessKey1
aws_secret_access_key=verySecretKey1 aws_secret_access_key=verySecretKey1
[aws_hosted_profile] [aws_hosted_profile]
aws_access_key_id={{YOUR_ACCESS_KEY}} aws_access_key_id={{YOUR_ACCESS_KEY}}
aws_secret_access_key={{YOUR_SECRET_KEY}} aws_secret_access_key={{YOUR_SECRET_KEY}}
Just as you need to mount your locationConfig.json, you will need to mount your Just as you need to mount your locationConfig.json, you will need to mount your
AWS credentials file at run time: AWS credentials file at run time:
@ -71,15 +71,15 @@ to get in the source bucket. ACL's would have to be updated
on AWS directly to enable this. on AWS directly to enable this.
S3BACKEND S3BACKEND
~~~~~~ ~~~~~~~~~
S3BACKEND=file S3BACKEND=file
^^^^^^^^^^^ ^^^^^^^^^^^^^^
When storing file data, for it to be persistent you must mount docker volumes When storing file data, for it to be persistent you must mount docker volumes
for both data and metadata. See `this section <#using-docker-volumes-in-production>`__ for both data and metadata. See `this section <#using-docker-volumes-in-production>`__
S3BACKEND=mem S3BACKEND=mem
^^^^^^^^^^ ^^^^^^^^^^^^^
This is ideal for testing - no data will remain after container is shutdown. This is ideal for testing - no data will remain after container is shutdown.
ENDPOINT ENDPOINT
@ -89,9 +89,9 @@ This variable specifies your endpoint. If you have a domain such as
new.host.com, by specifying that here, you and your users can direct s3 new.host.com, by specifying that here, you and your users can direct s3
server requests to new.host.com. server requests to new.host.com.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server $ docker run -d --name s3server -p 8000:8000 -e ENDPOINT=new.host.com scality/s3server
Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root Note: In your ``/etc/hosts`` file on Linux, OS X, or Unix with root
permissions, make sure to associate 127.0.0.1 with ``new.host.com`` permissions, make sure to associate 127.0.0.1 with ``new.host.com``
@ -107,7 +107,7 @@ You can set credentials for many accounts by editing
want to specify one set of your own, you can use these environment want to specify one set of your own, you can use these environment
variables. variables.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey docker run -d --name s3server -p 8000:8000 -e SCALITY_ACCESS_KEY_ID=newAccessKey
-e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server -e SCALITY_SECRET_ACCESS_KEY=newSecretKey scality/s3server
@ -123,9 +123,9 @@ This variable allows you to change the log level: info, debug or trace.
The default is info. Debug will give you more detailed logs and trace The default is info. Debug will give you more detailed logs and trace
will give you the most detailed. will give you the most detailed.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server $ docker run -d --name s3server -p 8000:8000 -e LOG_LEVEL=trace scality/s3server
SSL SSL
~~~ ~~~
@ -144,9 +144,9 @@ extra container to do SSL/TLS termination such as haproxy/nginx/stunnel
to limit what an exploit on either component could expose, as well as to limit what an exploit on either component could expose, as well as
certificates in a mounted volume certificates in a mounted volume
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT> $ docker run -d --name s3server -p 8000:8000 -e SSL=TRUE -e ENDPOINT=<YOUR_ENDPOINT>
scality/s3server scality/s3server
More information about how to use S3 server with SSL More information about how to use S3 server with SSL
@ -159,9 +159,9 @@ This variable instructs the Zenko CloudServer, and its data and metadata
components to listen on the specified address. This allows starting the data components to listen on the specified address. This allows starting the data
or metadata servers as standalone services, for example. or metadata servers as standalone services, for example.
.. code:: shell .. code-block:: shell
docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0 $ docker run -d --name s3server-data -p 9991:9991 -e LISTEN_ADDR=0.0.0.0
scality/s3server npm run start_dataserver scality/s3server npm run start_dataserver
@ -172,9 +172,9 @@ These variables configure the data and metadata servers to use,
usually when they are running on another host and only starting the stateless usually when they are running on another host and only starting the stateless
Zenko CloudServer. Zenko CloudServer.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -e DATA_HOST=s3server-data $ docker run -d --name s3server -e DATA_HOST=s3server-data
-e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server -e METADATA_HOST=s3server-metadata scality/s3server npm run start_s3server
REDIS\_HOST REDIS\_HOST
@ -183,9 +183,9 @@ REDIS\_HOST
Use this variable to connect to the redis cache server on another host than Use this variable to connect to the redis cache server on another host than
localhost. localhost.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 $ docker run -d --name s3server -p 8000:8000
-e REDIS_HOST=my-redis-server.example.com scality/s3server -e REDIS_HOST=my-redis-server.example.com scality/s3server
REDIS\_PORT REDIS\_PORT
@ -194,9 +194,9 @@ REDIS\_PORT
Use this variable to connect to the redis cache server on another port than Use this variable to connect to the redis cache server on another port than
the default 6379. the default 6379.
.. code:: shell .. code-block:: shell
docker run -d --name s3server -p 8000:8000 $ docker run -d --name s3server -p 8000:8000
-e REDIS_PORT=6379 scality/s3server -e REDIS_PORT=6379 scality/s3server
Tunables and Setup Tips Tunables and Setup Tips
@ -215,9 +215,9 @@ Docker volumes to host your data and metadata outside your Zenko CloudServer
Docker container. Otherwise, the data and metadata will be destroyed Docker container. Otherwise, the data and metadata will be destroyed
when you erase the container. when you erase the container.
.. code:: shell .. code-block:: shell
docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata $ docker run -­v $(pwd)/data:/usr/src/app/localData -­v $(pwd)/metadata:/usr/src/app/localMetadata
-p 8000:8000 ­-d scality/s3server -p 8000:8000 ­-d scality/s3server
This command mounts the host directory, ``./data``, into the container This command mounts the host directory, ``./data``, into the container
@ -233,11 +233,12 @@ Adding modifying or deleting accounts or users credentials
2. Use `Docker 2. Use `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__ Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__
to override the default ``authdata.json`` through a docker file mapping. to override the default ``authdata.json`` through a docker file mapping.
For example: For example:
.. code:: shell .. code-block:: shell
docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d $ docker run -v $(pwd)/authdata.json:/usr/src/app/conf/authdata.json -p 8000:8000 -d
scality/s3server scality/s3server
Specifying your own host name Specifying your own host name
@ -272,9 +273,9 @@ More information about location configuration
Then, run your Scality S3 Server using `Docker Then, run your Scality S3 Server using `Docker
Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__: Volume <https://docs.docker.com/engine/tutorials/dockervolumes/>`__:
.. code:: shell .. code-block:: shell
docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server $ docker run -v $(pwd)/config.json:/usr/src/app/config.json -p 8000:8000 -d scality/s3server
Your local ``config.json`` file will override the default one through a Your local ``config.json`` file will override the default one through a
docker file mapping. docker file mapping.
@ -292,7 +293,7 @@ The user needs to exist within the container, and own the folder
For instance, you can modify these lines in the dockerfile: For instance, you can modify these lines in the dockerfile:
.. code:: shell .. code-block:: shell
... ...
&& groupadd -r -g 1001 scality \ && groupadd -r -g 1001 scality \
@ -316,7 +317,7 @@ Sample ways to run it for CI are:
- With custom locations (one in-memory, one hosted on AWS), and custom - With custom locations (one in-memory, one hosted on AWS), and custom
credentials mounted: credentials mounted:
.. code:: shell .. code-block:: shell
docker run --name CloudServer -p 8000:8000 docker run --name CloudServer -p 8000:8000
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json -v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
@ -328,7 +329,7 @@ Sample ways to run it for CI are:
and custom credentials set as environment variables and custom credentials set as environment variables
(see `this section <#scality-access-key-id-and-scality-secret-access-key>`__): (see `this section <#scality-access-key-id-and-scality-secret-access-key>`__):
.. code:: shell .. code-block:: shell
docker run --name CloudServer -p 8000:8000 docker run --name CloudServer -p 8000:8000
-v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json -v $(pwd)/locationConfig.json:/usr/src/app/locationConfig.json
@ -346,7 +347,7 @@ multiple backends capabilities of Zenko CloudServer, and that you will have a
custom endpoint for your local storage, and custom credentials for your local custom endpoint for your local storage, and custom credentials for your local
storage: storage:
.. code:: shell .. code-block:: shell
docker run -d --name CloudServer docker run -d --name CloudServer
-v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata -v $(pwd)/data:/usr/src/app/localData -v $(pwd)/metadata:/usr/src/app/localMetadata

View File

@ -1,5 +1,5 @@
Getting Started Getting Started
================= ===============
.. figure:: ../res/scality-cloudserver-logo.png .. figure:: ../res/scality-cloudserver-logo.png
:alt: Zenko CloudServer logo :alt: Zenko CloudServer logo
@ -19,7 +19,7 @@ npm v3 . Up-to-date versions can be found at
Clone source code Clone source code
~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~
.. code:: shell .. code-block:: shell
git clone https://github.com/scality/S3.git git clone https://github.com/scality/S3.git
@ -28,14 +28,14 @@ Install js dependencies
Go to the ./S3 folder, Go to the ./S3 folder,
.. code:: shell .. code-block:: shell
npm install npm install
Run it with a file backend Run it with a file backend
-------------------------- --------------------------
.. code:: shell .. code-block:: shell
npm start npm start
@ -53,7 +53,7 @@ pre-created within the repository. If you would like to save the data or
metadata in different locations of your choice, you must specify them metadata in different locations of your choice, you must specify them
with absolute paths. So, when starting the server: with absolute paths. So, when starting the server:
.. code:: shell .. code-block:: shell
mkdir -m 700 $(pwd)/myFavoriteDataPath mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath mkdir -m 700 $(pwd)/myFavoriteMetadataPath
@ -64,7 +64,7 @@ with absolute paths. So, when starting the server:
Run it with multiple data backends Run it with multiple data backends
---------------------------------- ----------------------------------
.. code:: shell .. code-block:: shell
export S3DATA='multiple' export S3DATA='multiple'
npm start npm start
@ -76,7 +76,7 @@ With multiple backends, you have the ability to choose where each object
will be saved by setting the following header with a locationConstraint will be saved by setting the following header with a locationConstraint
on a PUT request: on a PUT request:
.. code:: shell .. code-block:: shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint' 'x-amz-meta-scal-location-constraint':'myLocationConstraint'
@ -91,7 +91,7 @@ constraints.
Run it with an in-memory backend Run it with an in-memory backend
-------------------------------- --------------------------------
.. code:: shell .. code-block:: shell
npm run mem_backend npm run mem_backend
@ -108,19 +108,20 @@ Testing
You can run the unit tests with the following command: You can run the unit tests with the following command:
.. code:: shell .. code-block:: shell
npm test npm test
You can run the multiple backend unit tests with: You can run the multiple backend unit tests with:
.. code:: shell .. code-block:: shell
CI=true S3DATA=multiple npm start CI=true S3DATA=multiple npm start
npm run multiple_backend_test npm run multiple_backend_test
You can run the linter with: You can run the linter with:
.. code:: shell .. code-block:: shell
npm run lint npm run lint
@ -152,13 +153,13 @@ instance port (``6379`` by default)
- Add the following to the etc/hosts file on your machine: - Add the following to the etc/hosts file on your machine:
.. code:: shell .. code-block:: shell
127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com 127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com
- Start the Zenko CloudServer in memory and run the functional tests: - Start the Zenko CloudServer in memory and run the functional tests:
.. code:: shell .. code-block:: shell
CI=true npm run mem_backend CI=true npm run mem_backend
CI=true npm run ft_test CI=true npm run ft_test
@ -263,7 +264,7 @@ These variables specify authentication credentials for an account named
Note: Anything in the ``authdata.json`` file will be ignored. Note: Anything in the ``authdata.json`` file will be ignored.
.. code:: shell .. code-block:: shell
SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start SCALITY_ACCESS_KEY_ID=newAccessKey SCALITY_SECRET_ACCESS_KEY=newSecretKey npm start
@ -275,7 +276,7 @@ If you wish to use https with your local Zenko CloudServer, you need to set up
SSL certificates. Here is a simple guide of how to do it. SSL certificates. Here is a simple guide of how to do it.
Deploying Zenko CloudServer Deploying Zenko CloudServer
^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^
First, you need to deploy **Zenko CloudServer**. This can be done very easily First, you need to deploy **Zenko CloudServer**. This can be done very easily
via `our **DockerHub** via `our **DockerHub**
@ -287,7 +288,7 @@ with a file backend).
distribution <https://docs.docker.com/engine/installation/>`__* distribution <https://docs.docker.com/engine/installation/>`__*
Updating your Zenko CloudServer container's config Updating your Zenko CloudServer container's config
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You're going to add your certificates to your container. In order to do You're going to add your certificates to your container. In order to do
so, you need to exec inside your Zenko CloudServer container. Run a so, you need to exec inside your Zenko CloudServer container. Run a
@ -295,7 +296,7 @@ so, you need to exec inside your Zenko CloudServer container. Run a
name should be ``scality/s3server``. Copy the corresponding container id name should be ``scality/s3server``. Copy the corresponding container id
(here we'll use ``894aee038c5e``, and run: (here we'll use ``894aee038c5e``, and run:
.. code:: sh .. code-block:: shell
$> docker exec -it 894aee038c5e bash $> docker exec -it 894aee038c5e bash
@ -307,7 +308,7 @@ Generate SSL key and certificates
There are 5 steps to this generation. The paths where the different There are 5 steps to this generation. The paths where the different
files are stored are defined after the ``-out`` option in each command files are stored are defined after the ``-out`` option in each command
.. code:: sh .. code-block:: shell
# Generate a private key for your CSR # Generate a private key for your CSR
$> openssl genrsa -out ca.key 2048 $> openssl genrsa -out ca.key 2048
@ -322,7 +323,7 @@ files are stored are defined after the ``-out`` option in each command
$> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256 $> openssl x509 -req -in test.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out test.crt -days 99999 -sha256
Update Zenko CloudServer ``config.json`` Update Zenko CloudServer ``config.json``
********************************** ****************************************
Add a ``certFilePaths`` section to ``./config.json`` with the Add a ``certFilePaths`` section to ``./config.json`` with the
appropriate paths: appropriate paths:
@ -336,7 +337,7 @@ appropriate paths:
} }
Run your container with the new config Run your container with the new config
**************************************** **************************************
First, you need to exit your container. Simply run ``$> exit``. Then, First, you need to exit your container. Simply run ``$> exit``. Then,
you need to restart your container. Normally, a simple you need to restart your container. Normally, a simple
@ -356,13 +357,13 @@ permissions), edit the line of localhost so it looks like this:
127.0.0.1 localhost s3.scality.test 127.0.0.1 localhost s3.scality.test
Copy the local certificate authority from your container Copy the local certificate authority from your container
********************************************************* ********************************************************
In the above commands, it's the file named ``ca.crt``. Choose the path In the above commands, it's the file named ``ca.crt``. Choose the path
you want to save this file at (here we chose ``/root/ca.crt``), and run you want to save this file at (here we chose ``/root/ca.crt``), and run
something like: something like:
.. code:: sh .. code-block:: shell
$> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt $> docker cp 894aee038c5e:/usr/src/app/ca.crt /root/ca.crt

View File

@ -337,7 +337,7 @@ tutorial, we used an Ubuntu 14.04 host to deploy and use s3fs over
Scality's Zenko CloudServer. Scality's Zenko CloudServer.
Deploying Zenko CloudServer with SSL Deploying Zenko CloudServer with SSL
---------------------------- ------------------------------------
First, you need to deploy **Zenko CloudServer**. This can be done very easily First, you need to deploy **Zenko CloudServer**. This can be done very easily
via `our DockerHub via `our DockerHub
@ -401,7 +401,7 @@ s3fs expects you to provide it with a password file. Our file is
$> chmod 600 /etc/passwd-s3fs $> chmod 600 /etc/passwd-s3fs
Using Zenko CloudServer with s3fs Using Zenko CloudServer with s3fs
------------------------ ---------------------------------
First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``: First, you're going to need a mountpoint; we chose ``/mnt/tests3fs``:
@ -634,7 +634,7 @@ backing up is a folder I modify permanently during my workday, so I want
incremental backups every 5mn from 8AM to 9PM monday to friday. Here is incremental backups every 5mn from 8AM to 9PM monday to friday. Here is
the line I will paste in my crontab: the line I will paste in my crontab:
.. code:: cron .. code:: sh
*/5 8-20 * * 1-5 /usr/local/sbin/backup.sh */5 8-20 * * 1-5 /usr/local/sbin/backup.sh

229
docs/MD_SEARCH.md Normal file
View File

@ -0,0 +1,229 @@
# Metadata Search Documenation
## Description
This feature enables metadata search to be performed on the metadata of objects
stored in Zenko.
## Requirements
+ MongoDB
## Design
The MD Search feature expands on the existing `GET Bucket` S3 API. It allows
users to conduct metadata searches by adding the custom Zenko querystring
parameter, `search`. The `search` parameter is of a pseudo
SQL WHERE clause structure and supports basic SQL operators:
ex. `"A=1 AND B=2 OR C=3"` (more complex queries can also be achieved with the
use of nesting operators, `(` and `)`).
The search process is as follows:
+ Zenko receives a `GET` request.
```
# regular getBucket request
GET /bucketname HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
# getBucket versions request
GET /bucketname?versions HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
# search getBucket request
GET /bucketname?search=key%3Dsearch-item HTTP/1.1
Host: 127.0.0.1:8000
Date: Wed, 18 Oct 2018 17:50:00 GMT
Authorization: authorization string
```
+ If the request does not contain the query param `search`, a normal bucket
listing is performed and a XML result containing the list of objects will be
returned as the response.
+ If the request does contain the query parameter `search`, the search string is
parsed and validated.
+ If the search string is invalid, an `InvalidArgument` error will be
returned as response.
+ If the search string is valid, it will be parsed and an abstract syntax
tree (AST) is generated.
+ The AST is then passed to the MongoDB backend to be used as the query filter
for retrieving objects in a bucket that satisfies the requested search
conditions.
+ The filtered results are then parsed and returned as the response.
The results from MD search is of the same structure as the `GET Bucket`
results:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucketname</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>objectKey</Key>
<LastModified>2018-04-19T18:31:49.426Z</LastModified>
<ETag>&quot;d41d8cd98f00b204e9800998ecf8427e&quot;</ETag>
<Size>0</Size>
<Owner>
<ID>79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be</ID>
<DisplayName>Bart</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
</Contents>
<Contents>
...
</Contents>
</ListBucketResult>
```
## Performing MD Search with Zenko
To make a successful request to Zenko, you would need
+ Zenko Credentials
+ Sign request with Auth V4
With requirements, you can peform metadata searches by:
+ using the `search_bucket` tool in the
[Scality/S3](https://github.com/scality/S3) GitHub repository.
+ creating an AuthV4 signed HTTP request to Zenko in the programming language of
choice
### Using the S3 Tool
After cloning the [Scality/S3](https://github.com/scality/S3) GitHub repository
and installing the necessary dependencies, you can run the following command
in the S3 project root directory to access the search tool.
```
node bin/search_bucket
```
This will generate the following output
```
Usage: search_bucket [options]
Options:
-V, --version output the version number
-a, --access-key <accessKey> Access key id
-k, --secret-key <secretKey> Secret access key
-b, --bucket <bucket> Name of the bucket
-q, --query <query> Search query
-h, --host <host> Host of the server
-p, --port <port> Port of the server
-s --ssl
-v, --verbose
-h, --help output usage information
```
In the following examples, our Zenko Server is accessible on endpoint
`http://127.0.0.1:8000` and contains the bucket `zenkobucket`.
```
# search for objects with metadata "blue"
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
-q "x-amz-meta-color=blue" -h 127.0.0.1 -p 8000
# search for objects tagged with "type=color"
node bin/search_bucket -a accessKey1 -k verySecretKey1 -b zenkobucket \
-q "tags.type=color" -h 127.0.0.1 -p 8000
```
### Coding Examples
Search requests can be also performed by making HTTP requests authenticated
with the `AWS Signature version 4` scheme.\
See the following urls for more information about the V4 authentication scheme.
+ http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html
+ http://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
You can also view examples for making requests with Auth V4 in various
languages [here](../exmaples).
### Specifying Metadata Fields
To search common metadata headers:
```
{metadata-key}{supported SQL op}{search value}
# example
key = blueObject
size > 0
key LIKE "blue.*"
```
To search custom user metadata:
```
# metadata must be prefixed with "x-amz-meta-"
x-amz-meta-{usermetadata-key}{supported SQL op}{search value}
# example
x-amz-meta-color = blue
x-amz-meta-color != red
x-amz-meta-color LIKE "b.*"
```
To search tags:
```
# tag searches must be prefixed with "tags."
tags.{tag-key}{supported SQL op}{search value}
# example
tags.type = color
```
### Differences from SQL
The MD search queries are similar to the `WHERE` clauses of SQL queries, but
they differ in that:
+ MD search queries follow the `PCRE` format
+ Search queries do not require values with hyphens to be enclosed in
backticks, ``(`)``
```
# SQL query
`x-amz-meta-search-item` = `ice-cream-cone`
# MD Search query
x-amz-meta-search-item = ice-cream-cone
```
+ The search queries do not support all of the SQL operators.
+ Supported SQL Operators: `=`, `<`, `>`, `<=`, `>=`, `!=`,
`AND`, `OR`, `LIKE`, `<>`
+ Unsupported SQL Operators: `NOT`, `BETWEEN`, `IN`, `IS`, `+`,
`-`, `%`, `^`, `/`, `*`, `!`
#### Using Regular Expressions in MD Search
+ Regular expressions used in MD search differs from SQL in that wildcards are
represented with `.*` instead of `%`.
+ Regex patterns must be wrapped in quotes as not doing so can lead to
misinterpretation of patterns.
+ Regex patterns can be written in form of the `/pattern/` syntax or
just the pattern if one does not require regex options, similar to `PCRE`.
Example regular expressions:
```
# search for strings containing word substring "helloworld"
".*helloworld.*"
"/.*helloworld.*/"
"/.*helloworld.*/i"
```

21
docs/Makefile Normal file
View File

@ -0,0 +1,21 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = Zenko
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

View File

@ -1,3 +1,5 @@
.. _use-public-cloud:
Using Public Clouds as data backends Using Public Clouds as data backends
==================================== ====================================
@ -243,7 +245,7 @@ There are a few configurable options here:
this region should behave like any other AWS S3 region (in the case of MS Azure this region should behave like any other AWS S3 region (in the case of MS Azure
hosted data, this is mostly relevant for the format of errors); hosted data, this is mostly relevant for the format of errors);
- :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually - :code:`azureStorageEndpoint` : set to your storage account's endpoint, usually
:code:`https://{{storageAccountName}}.blob.core.windows.name`; :code:`https://{{storageAccountName}}.blob.core.windows.net`;
- :code:`azureContainerName` : set to an *existing container* in your MS Azure - :code:`azureContainerName` : set to an *existing container* in your MS Azure
storage account; this is the container in which your data will be stored for storage account; this is the container in which your data will be stored for
this location constraint; this location constraint;

View File

@ -0,0 +1,79 @@
============================================
Add New Backend Storage To Zenko Cloudserver
============================================
This set of documents aims at bootstrapping developers with Zenko's Cloudserver
module, so they can then go on and contribute features.
.. toctree::
:maxdepth: 2
non-s3-compatible-backend
s3-compatible-backend
We always encourage our community to offer new extensions to Zenko,
and new backend support is paramount to meeting more community needs.
If that is something you want to contribute (or just do on your own
version of the cloudserver image), this is the guid to read. Please
make sure you follow our `Contributing Guidelines`_/.
If you need help with anything, please search our `forum`_ for more
information.
Add support for a new backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Currently the main public cloud protocols are supported and more can
be added. There are two main types of backend: those compatible with
Amazon's S3 protocol and those not compatible.
================= ========== ============ ===========
Backend type Supported Active WIP Not started
================= ========== ============ ===========
Private disk/fs x
AWS S3 x
Microsoft Azure x
Backblaze B2 x
Google Cloud x
Openstack Swift x
================= ========== ============ ===========
.. important:: Should you want to request for a new backend to be
supported, please do so by opening a `Github issue`_,
and filling out the "Feature Request" section of our
template.
To add support for a new backend support to Cloudserver official
repository, please follow these steps:
- familiarize yourself with our `Contributing Guidelines`_
- open a `Github issue`_ and fill out Feature Request form, and
specify you would like to contribute it yourself;
- wait for our core team to get back to you with an answer on whether
we are interested in taking that contribution in (and hence
committing to maintaining it over time);
- once approved, fork the repository and start your development;
- use the `forum`_ with any question you may have during the
development process;
- when you think it's ready, let us know so that we create a feature
branch against which we'll compare and review your code;
- open a pull request with your changes against that dedicated feature
branch;
- once that pull request gets merged, you're done.
.. tip::
While we do take care of the final rebase (when we merge your feature
branch on the latest default branch), we do ask that you keep up to date with our latest default branch
until then.
.. important::
If we do not approve your feature request, you may of course still
work on supporting a new backend: all our "no" means is that we do not
have the resources, as part of our core development team, to maintain
this feature for the moment.
.. _GitHub issue: https://github.com/scality/S3/issues
.. _Contributing Guidelines: https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md
.. _forum: https://forum.zenko.io

View File

@ -0,0 +1,53 @@
=================
Add A New Backend
=================
Supporting all possible public cloud storage APIs is Cloudserver's
ultimate goal. As an open source project, contributions are welcome.
The first step is to get familiar with building a custom Docker image
for Cloudserver.
Build a Custom Docker Image
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Clone Zenko's Cloudserver, install all dependencies and start the
service:
.. code-block:: shell
$ git clone https://github.com/scality/cloudserver
$ cd cloudserver
$ npm install
$ npm start
.. tip::
Some optional dependencies may fail, resulting in you seeing `NPM
WARN` messages; these can safely be ignored. Refer to the User
documentation for all available options.
Build the Docker image:
.. code-block:: shell
# docker build . -t
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
Push the newly created Docker image to your own hub:
.. code-block:: shell
# docker push
# {{YOUR_DOCKERHUB_ACCOUNT}}/cloudserver:{{OPTIONAL_VERSION_TAG}}
.. note::
To perform this last operation, you need to be authenticated with DockerHub
There are two main types of backend you could want Zenko to support:
== link:S3_COMPATIBLE_BACKENDS.adoc[S3 compatible data backends]
== link:NON_S3_COMPATIBLE_BACKENDS.adoc[Data backends using another protocol than the S3 protocol]

View File

@ -0,0 +1,530 @@
==========================================================
Adding support for data backends not supporting the S3 API
==========================================================
These backends abstract the complexity of multiple APIs to let users
work on a single common namespace across multiple clouds.
This documents aims at introducing you to the right files in
Cloudserver (the Zenko stack's subcomponent in charge of API
translation, among other things) to add support to your own backend of
choice.
General configuration
~~~~~~~~~~~~~~~~~~~~~
There are a number of constants and environment variables to define to support a
new data backend; here is a list and where to find them:
:file:`/constants.js`
---------------------
* give your backend type a name, as part of the `externalBackends` object;
* specify whether versioning is implemented, as part of the
`versioningNotImplemented` object;
:file:`/lib/Config.js`
----------------------
* this is where you should put common utility functions, like the ones to parse
the location object from `locationConfig.json`;
* make sure you define environment variables (like `GCP_SERVICE_EMAIL` as we'll
use those internally for the CI to test against the real remote backend;
:file:`/lib/data/external/{{backendName}}Client.js`
---------------------------------------------------
* this file is where you'll instantiate your backend client; this should be a
class with a constructor taking the config object built in `/lib/Config.js` as
parameter;
* over time, you may need some utility functions which we've defined in the
folder `/api/apiUtils`, and in the file `/lib/data/external/utils`;
:file:`/lib/data/external/utils.js`
-----------------------------------
* make sure to add options for `sourceLocationConstraintType` to be equal to
the name you gave your backend in :file:`/constants.js`;
:file:`/lib/data/external/{{BackendName}}_lib/`
-----------------------------------------------
* this folder is where you'll put the functions needed for supporting your
backend; keep your files as atomic as possible;
:file:`/tests/locationConfig/locationConfigTests.json`
------------------------------------------------------
* this file is where you'll create location profiles to be used by your
functional tests;
:file:`/lib/data/locationConstraintParser.js`
---------------------------------------------
* this is where you'll instantiate your client if the operation the end user
sent effectively writes to your backend; everything happens inside the
function `parseLC()`; you should add a condition that executes if
`locationObj.type` is the name of your backend (that you defined in
`constants.js`), and instantiates a client of yours. See pseudocode below,
assuming location type name is `ztore`:
.. code-block:: js
:linenos:
:emphasize-lines: 12
(...) //<1>
const ZtoreClient = require('./external/ZtoreClient');
const { config } = require('../Config'); //<1>
function parseLC(){ //<1>
(...) //<1>
Object.keys(config.locationConstraints).forEach(location => { //<1>
const locationObj = config.locationConstraints[location]; //<1>
(...) //<1>
if (locationObj.type === 'ztore' {
const ztoreEndpoint = config.getZtoreEndpoint(location);
const ztoreCredentials = config.getZtoreCredentials(location); //<2>
clients[location] = new ZtoreClient({
ztoreEndpoint,
ztoreCredentials,
ztoreBucketname: locationObj.details.ztoreBucketName,
bucketMatch: locationObj.details.BucketMatch,
dataStoreName: location,
}); //<3>
clients[location].clientType = 'ztore';
});
(...) //<1>
});
}
1. Code that is already there
2. You may need more utility functions depending on your backend specs
3. You may have more fields required in your constructor object depending on
your backend specs
Operation of type PUT
~~~~~~~~~~~~~~~~~~~~~
PUT routes are usually where people get started, as it's the easiest to check!
Simply go on your remote backend console and you'll be able to see whether your
object actually went up in the cloud...
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `put()` function is also called
`put()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`put(stream, size, keyContext, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
- `stream`: the stream of data you want to put in the cloud; if you're
unfamiliar with node.js streams, we suggest you start training, as we use
them a lot !
- `size`: the size of the object you're trying to put;
- `keyContext`: an object with metadata about the operation; common entries are
`namespace`, `buckerName`, `owner`, `cipherBundle`, and `tagging`; if these
are not sufficient for your integration, contact us to get architecture
validation before adding new entries;
- `reqUids`: the request unique ID used for logging;
- `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your PUT operation, and
then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/put/put{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
function returning ajusted `keys[]` to your tests.
Operation of type GET
~~~~~~~~~~~~~~~~~~~~~
GET routes are easy to test after PUT routes are implemented, hence why we're
covering them second.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `get()` function is also called
`get()`, and it's defined in `/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`get(objectGetInfo, range, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
- `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
- `range`: the range of bytes you will get, for "get-by-range" operations (we
recommend you do simple GETs first, and then look at this);
- `reqUids`: the request unique ID used for logging;
- `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your GET operation, and
then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the tutorial in order
(that is, if you have covered the PUT operation already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Operation of type DELETE
~~~~~~~~~~~~~~~~~~~~~~~~
DELETE routes are easy to test after PUT routes are implemented, and they are
similar to GET routes in our implementation, hence why we're covering them
third.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `delete()` function is also called
`delete()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`delete(objectGetInfo, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
//TODO: generate this from jsdoc
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your DELETE operation,
and then import then in `/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/delete/delete{{BackendName}}js`
-----------------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the
tutorial in order (that is, if you have covered the PUT operation
already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Operation of type HEAD
~~~~~~~~~~~~~~~~~~~~~~
HEAD routes are very similar to DELETE routes in our implementation, hence why
we're covering them fourth.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `head()` function is also called
`head()`, and it's defined in :file:`/lib/data/multipleBackendGateway.js`;
- define a function with signature like
`head(objectGetInfo, reqUids, callback)`; this is worth exploring a
bit more as these parameters are the same for all backends:
// TODO:: generate this from jsdoc
* `objectGetInfo`: a dictionary with two entries: `key`, the object key in the
data store, and `client`, the data store name;
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your HEAD operation,
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/get/get{{BackendName}}js`
-----------------------------------------------------------------------------------
- every contribution should come with thorough functional tests, showing
nominal context gives expected behaviour, and error cases are handled in a way
that is standard with the backend (including error messages and code);
- the ideal setup is if you simulate your backend locally, so as not to be
subjected to network flakiness in the CI; however, we know there might not be
mockups available for every client; if that is the case of your backend, you
may test against the "real" endpoint of your data backend;
:file:`tests/functional/aws-node-sdk/test/multipleBackend/utils.js`
-------------------------------------------------------------------
.. note:: You should need this section if you have followed the tutorial in order
(that is, if you have covered the PUT operation already)
- where you'll define a constant for your backend location matching your
:file:`/tests/locationConfig/locationConfigTests.json`
- depending on your backend, the sample `keys[]` and associated made up objects
may not work for you (if your backend's key format is different, for example);
if that is the case, you should add a custom `utils.get{{BackendName}}keys()`
Healthcheck
~~~~~~~~~~~
Healtchecks are used to make sure failure to write to a remote cloud is due to
a problem on that remote cloud, an not on Zenko's side.
This is usually done by trying to create a bucket that already exists, and
making sure you get the expected answer.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
- the function that is going to call your `healthcheck()` function is called
`checkExternalBackend()` and it's defined in
:file:`/lib/data/multipleBackendGateway.js`; you will need to add your own;
- your healtcheck function should get `location` as a parameter, which is an
object comprising:`
* `reqUids`: the request unique ID used for logging;
* `callback`: your function's callback (should handle errors);
:file:`/lib/data/external/{{backendName}}_lib/{{backendName}}_create_bucket.js`
-------------------------------------------------------------------------------
- this is where you should write the function performing the actual bucket
creation;
:file:`/lib/data/external/{{backendName}}_lib/utils.js`
-------------------------------------------------------
- add an object named per your backend's name to the `backendHealth` dictionary,
with proper `response` and `time` entries;
:file:`lib/data/multipleBackendGateway.js`
------------------------------------------
- edit the `healthcheck` function to add your location's array, and call your
healthcheck; see pseudocode below for a sample implementation, provided your
backend name is `ztore`
.. code-block:: js
:linenos:
(...) //<1>
healthcheck: (flightCheckOnStartUp, log, callback) => { //<1>
(...) //<1>
const ztoreArray = []; //<2>
async.each(Object.keys(clients), (location, cb) => { //<1>
(...) //<1>
} else if (client.clientType === 'ztore' {
ztoreArray.push(location); //<3>
return cb();
}
(...) //<1>
multBackendResp[location] = { code: 200, message: 'OK' }; //<1>
return cb();
}, () => { //<1>
async.parallel([
(...) //<1>
next => checkExternalBackend( //<4>
clients, ztoreArray, 'ztore', flightCheckOnStartUp,
externalBackendHealthCheckInterval, next),
] (...) //<1>
});
(...) //<1>
});
}
1. Code that is already there
2. The array that will store all locations of type 'ztore'
3. Where you add locations of type 'ztore' to the array
4. Where you actually call the healthcheck function on all 'ztore' locations
Multipart upload (MPU)
~~~~~~~~~~~~~~~~~~~~~~
This is the final part to supporting a new backend! MPU is far from
the easiest subject, but you've come so far it shouldn't be a problem.
These are the files you'll need to edit:
:file:`/lib/data/external/{{BackendName}}Client.js`
---------------------------------------------------
You'll be creating four functions with template signatures:
- `createMPU(Key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
cacheControl, contentDisposition, contentEncoding, log, callback)` will
initiate the multi part upload process; now, here, all parameters are
metadata headers except for:
* `Key`, the key id for the final object (collection of all parts);
* `bucketName`, the name of the bucket to which we will do an MPU;
* `log`, the logger;
- `uploadPart(request, streamingV4Params, stream, size, key, uploadId, partNumber, bucketName, log, callback)`
will be called for each part; the parameters can be explicited as follow:
* `request`, the request object for putting the part;
* `streamingV4Params`, parameters for auth V4 parameters against S3;
* `stream`, the node.js readable stream used to put the part;
* `size`, the size of the part;
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `partNumber`, the number of the part in this MPU (ordered);
* `bucketName`, the name of the bucket to which we will do an MPU;
* `log`, the logger;
- `completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback)` will
end the MPU process once all parts are uploaded; parameters can be explicited
as follows:
* `jsonList`, user-sent list of parts to include in final mpu object;
* `mdInfo`, object containing 3 keys: storedParts, mpuOverviewKey, and
splitter;
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `bucketName`, name of bucket;
* `log`, logger instance:
- `abortMPU(key, uploadId, bucketName, log, callback)` will handle errors, and
make sure that all parts that may have been uploaded will be deleted if the
MPU ultimately fails; the parameters are:
* `key`, the key of the object;
* `uploadId`, multipart upload id string;
* `bucketName`, name of bucket;
* `log`, logger instance.
:file:`/lib/api/objectPutPart.js`
---------------------------------
- you'll need to add your backend type in appropriate sections (simply look for
other backends already implemented).
:file:`/lib/data/external/{{backendName}}_lib/`
-----------------------------------------------
- this is where you should put all utility functions for your MPU operations,
and then import then in :file:`/lib/data/external/{{BackendName}}Client.js`, to keep
your code clean;
:file:`lib/data/multipleBackendGateway.js`
------------------------------------------
- edit the `createMPU` function to add your location type, and call your
`createMPU()`; see pseudocode below for a sample implementation, provided your
backend name is `ztore`
.. code-block:: javascript
:linenos:
(...) //<1>
createMPU:(key, metaHeaders, bucketName, websiteRedirectHeader, //<1>
location, contentType, cacheControl, contentDisposition,
contentEncoding, log, cb) => {
const client = clients[location]; //<1>
if (client.clientType === 'aws_s3') { //<1>
return client.createMPU(key, metaHeaders, bucketName,
websiteRedirectHeader, contentType, cacheControl,
contentDisposition, contentEncoding, log, cb);
} else if (client.clientType === 'ztore') { //<2>
return client.createMPU(key, metaHeaders, bucketName,
websiteRedirectHeader, contentType, cacheControl,
contentDisposition, contentEncoding, log, cb);
}
return cb();
};
(...) //<1>
1. Code that is already there
2. Where the `createMPU()` of your client is actually called
Add functional tests
~~~~~~~~~~~~~~~~~~~~
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/initMPU/{{BackendName}}InitMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/listParts/{{BackendName}}ListPart.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuAbort/{{BackendName}}AbortMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuComplete/{{BackendName}}CompleteMPU.js`
* :file:`tests/functional/aws-node-sdk/test/multipleBackend/mpuParts/{{BackendName}}UploadPart.js`
Adding support in Orbit, Zenko's UI for simplified Multi Cloud Management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This can only be done by our core developers' team. Once your backend
integration is merged, you may open a feature request on the
`Zenko repository`_, and we will
get back to you after we evaluate feasability and maintainability.
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new

View File

@ -0,0 +1,43 @@
======================
S3 compatible backends
======================
Adding support in Zenkos Cloudserver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is the easiest case for backend support integration: there is nothing to do
but configuration! Follow the steps described in our
:ref:`use-public-cloud` and make sure you:
- set ``details.awsEndpoint`` to your storage provider endpoint;
- use ``details.credentials`` and *not* ``details.credentialsProfile`` to set your
credentials for that S3-compatible backend.
For example, if youre using a Wasabi bucket as a backend, then your region
definition for that backend will look something like:
::
"wasabi-bucket-zenkobucket": {
"type": "aws_s3",
"legacyAwsBehavior": true,
"details": {
"awsEndpoint": "s3.wasabisys.com",
"bucketName": "zenkobucket",
"bucketMatch": true,
"credentials": {
"accessKey": "\\{YOUR_WASABI_ACCESS_KEY}",
"secretKey": "\\{YOUR_WASABI_SECRET_KEY}"
}
}
},
Adding support in Zenko Orbit
#############################
This can only be done by our core developpers' team. If thats what youre
after, open a feature request on the `Zenko repository`_, and we will
get back to you after we evaluate feasability and maintainability.
.. _Zenko repository: https://www.github.com/scality/Zenko/issues/new

View File

@ -1,11 +1,12 @@
Scality Zenko CloudServer Scality Zenko CloudServer
================== =========================
.. _user-docs: .. _user-docs:
.. toctree:: .. toctree::
:maxdepth: 2 :maxdepth: 2
:caption: Documentation :caption: Documentation
:glob:
CONTRIBUTING CONTRIBUTING
GETTING_STARTED GETTING_STARTED
@ -14,3 +15,4 @@ Scality Zenko CloudServer
DOCKER DOCKER
INTEGRATIONS INTEGRATIONS
ARCHITECTURE ARCHITECTURE
developers/*

2
docs/requirements.in Normal file
View File

@ -0,0 +1,2 @@
Sphinx >= 1.7.5
recommonmark >= 0.4.0

119
docs/requirements.txt Normal file
View File

@ -0,0 +1,119 @@
#
# This file is autogenerated by pip-compile
# To update, run:
#
# tox -e pip-compile
#
alabaster==0.7.12 \
--hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \
--hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 \
# via sphinx
babel==2.6.0 \
--hash=sha256:6778d85147d5d85345c14a26aada5e478ab04e39b078b0745ee6870c2b5cf669 \
--hash=sha256:8cba50f48c529ca3fa18cf81fa9403be176d374ac4d60738b839122dfaaa3d23 \
# via sphinx
certifi==2018.10.15 \
--hash=sha256:339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c \
--hash=sha256:6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a \
# via requests
chardet==3.0.4 \
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
# via requests
commonmark==0.5.4 \
--hash=sha256:34d73ec8085923c023930dfc0bcd1c4286e28a2a82de094bb72fabcc0281cbe5 \
# via recommonmark
docutils==0.14 \
--hash=sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6 \
--hash=sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274 \
--hash=sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6 \
# via recommonmark, sphinx
idna==2.7 \
--hash=sha256:156a6814fb5ac1fc6850fb002e0852d56c0c8d2531923a51032d1b70760e186e \
--hash=sha256:684a38a6f903c1d71d6d5fac066b58d7768af4de2b832e426ec79c30daa94a16 \
# via requests
imagesize==1.1.0 \
--hash=sha256:3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8 \
--hash=sha256:f3832918bc3c66617f92e35f5d70729187676313caa60c187eb0f28b8fe5e3b5 \
# via sphinx
jinja2==2.10 \
--hash=sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd \
--hash=sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4 \
# via sphinx
markupsafe==1.1.0 \
--hash=sha256:048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432 \
--hash=sha256:130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b \
--hash=sha256:19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9 \
--hash=sha256:1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af \
--hash=sha256:1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834 \
--hash=sha256:1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd \
--hash=sha256:1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d \
--hash=sha256:31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7 \
--hash=sha256:3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b \
--hash=sha256:4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3 \
--hash=sha256:525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c \
--hash=sha256:52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2 \
--hash=sha256:52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7 \
--hash=sha256:5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36 \
--hash=sha256:5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1 \
--hash=sha256:5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e \
--hash=sha256:7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1 \
--hash=sha256:83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c \
--hash=sha256:857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856 \
--hash=sha256:98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550 \
--hash=sha256:bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492 \
--hash=sha256:d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672 \
--hash=sha256:e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401 \
--hash=sha256:edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6 \
--hash=sha256:efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6 \
--hash=sha256:f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c \
--hash=sha256:f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd \
--hash=sha256:fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1 \
# via jinja2
packaging==18.0 \
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
# via sphinx
pygments==2.2.0 \
--hash=sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d \
--hash=sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc \
# via sphinx
pyparsing==2.3.0 \
--hash=sha256:40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b \
--hash=sha256:f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592 \
# via packaging
pytz==2018.7 \
--hash=sha256:31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca \
--hash=sha256:8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6 \
# via babel
recommonmark==0.4.0 \
--hash=sha256:6e29c723abcf5533842376d87c4589e62923ecb6002a8e059eb608345ddaff9d \
--hash=sha256:cd8bf902e469dae94d00367a8197fb7b81fcabc9cfb79d520e0d22d0fbeaa8b7
requests==2.20.1 \
--hash=sha256:65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54 \
--hash=sha256:ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263 \
# via sphinx
six==1.11.0 \
--hash=sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9 \
--hash=sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb \
# via packaging, sphinx
snowballstemmer==1.2.1 \
--hash=sha256:919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128 \
--hash=sha256:9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89 \
# via sphinx
sphinx==1.8.2 \
--hash=sha256:120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea \
--hash=sha256:b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64
sphinxcontrib-websupport==1.1.0 \
--hash=sha256:68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd \
--hash=sha256:9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9 \
# via sphinx
typing==3.6.6 \
--hash=sha256:4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d \
--hash=sha256:57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4 \
--hash=sha256:a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a \
# via sphinx
urllib3==1.24.1 \
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
# via requests

View File

@ -96,6 +96,14 @@ models:
s3: s3:
filename: /artifacts/s3.log filename: /artifacts/s3.log
follow: true follow: true
- ShellCommand: &add-hostname
name: add hostname
command: |
echo "127.0.0.1 testrequestbucket.localhost" >> /etc/hosts
echo \
"127.0.0.1 bucketwebsitetester.s3-website-us-east-1.amazonaws.com" \
>> /etc/hosts
haltOnFailure: True
- ShellCommand: &setup-junit-upload - ShellCommand: &setup-junit-upload
name: preparing junit files for upload name: preparing junit files for upload
command: | command: |
@ -133,6 +141,7 @@ stages:
steps: steps:
- Git: *clone - Git: *clone
- ShellCommand: *npm-install - ShellCommand: *npm-install
- ShellCommand: *add-hostname
- ShellCommand: *credentials - ShellCommand: *credentials
- ShellCommand: - ShellCommand:
name: Linting name: Linting
@ -146,6 +155,8 @@ stages:
name: Unit Coverage name: Unit Coverage
command: | command: |
set -ex set -ex
unset HTTP_PROXY HTTPS_PROXY NO_PROXY
unset http_proxy https_proxy no_proxy
mkdir -p $CIRCLE_TEST_REPORTS/unit mkdir -p $CIRCLE_TEST_REPORTS/unit
npm test npm test
npm run test_legacy_location npm run test_legacy_location

View File

@ -10,6 +10,7 @@ spec:
- ip: "127.0.0.1" - ip: "127.0.0.1"
hostnames: hostnames:
- "bucketwebsitetester.s3-website-us-east-1.amazonaws.com" - "bucketwebsitetester.s3-website-us-east-1.amazonaws.com"
- "testrequestbucket.localhost"
- "pykmip.local" - "pykmip.local"
{% if vars.pykmip is defined and vars.pykmip == 'enabled' -%} {% if vars.pykmip is defined and vars.pykmip == 'enabled' -%}
initContainers: initContainers:

46
examples/go-md-search.go Normal file
View File

@ -0,0 +1,46 @@
package main
import (
"fmt"
"time"
"bytes"
"net/http"
"net/url"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/signer/v4"
)
func main() {
// Input AWS access key, secret key
aws_access_key_id := "accessKey1"
aws_secret_access_key := "verySecretKey1"
endpoint := "http://localhost:8000"
bucket_name := "bucketname"
searchQuery := url.QueryEscape("x-amz-meta-color=blue")
buf := bytes.NewBuffer([]byte{})
requestUrl := fmt.Sprintf("%s/%s?search=%s",
endpoint, bucket_name, searchQuery)
request, err := http.NewRequest("GET", requestUrl, buf)
if err != nil {
panic(err)
}
reader := bytes.NewReader(buf.Bytes())
credentials := credentials.NewStaticCredentials(aws_access_key_id,
aws_secret_access_key, "")
signer := v4.NewSigner(credentials)
signer.Sign(request, reader, "s3", "us-east-1", time.Now())
client := &http.Client{}
resp, err := client.Do(request)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
fmt.Println(string(body))
}

View File

@ -0,0 +1,28 @@
const { S3 } = require('aws-sdk');
const config = {
sslEnabled: false,
endpoint: 'http://127.0.0.1:8000',
signatureCache: false,
signatureVersion: 'v4',
region: 'us-east-1',
s3ForcePathStyle: true,
accessKeyId: 'accessKey1',
secretAccessKey: 'verySecretKey1',
};
const s3Client = new S3(config);
const encodedSearch =
encodeURIComponent('x-amz-meta-color="blue"');
const req = s3Client.listObjects({ Bucket: 'bucketname' });
// the build event
req.on('build', () => {
req.httpRequest.path = `${req.httpRequest.path}?search=${encodedSearch}`;
});
req.on('success', res => {
process.stdout.write(`Result ${res.data}`);
});
req.on('error', err => {
process.stdout.write(`Error ${err}`);
});
req.send();

View File

@ -0,0 +1,79 @@
import datetime
import hashlib
import hmac
import urllib
# pip install requests
import requests
access_key = 'accessKey1'
secret_key = 'verySecretKey1'
method = 'GET'
service = 's3'
host = 'localhost:8000'
region = 'us-east-1'
canonical_uri = '/bucketname'
query = 'x-amz-meta-color=blue'
canonical_querystring = 'search=%s' % (urllib.quote(query))
algorithm = 'AWS4-HMAC-SHA256'
t = datetime.datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
payload_hash = hashlib.sha256('').hexdigest()
canonical_headers = \
'host:{0}\nx-amz-content-sha256:{1}\nx-amz-date:{2}\n' \
.format(host, payload_hash, amz_date)
signed_headers = 'host;x-amz-content-sha256;x-amz-date'
canonical_request = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}' \
.format(method, canonical_uri, canonical_querystring, canonical_headers,
signed_headers, payload_hash)
print canonical_request
credential_scope = '{0}/{1}/{2}/aws4_request' \
.format(date_stamp, region, service)
string_to_sign = '{0}\n{1}\n{2}\n{3}' \
.format(algorithm, amz_date, credential_scope,
hashlib.sha256(canonical_request).hexdigest())
signing_key = getSignatureKey(secret_key, date_stamp, region, service)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'),
hashlib.sha256).hexdigest()
authorization_header = \
'{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}' \
.format(algorithm, access_key, credential_scope, signed_headers, signature)
# The 'host' header is added automatically by the Python 'requests' library.
headers = {
'X-Amz-Content-Sha256': payload_hash,
'X-Amz-Date': amz_date,
'Authorization': authorization_header
}
endpoint = 'http://' + host + canonical_uri + '?' + canonical_querystring
r = requests.get(endpoint, headers=headers)
print (r.text)

View File

@ -10,15 +10,64 @@ const cronParser = require('cron-parser');
const { isValidBucketName } = require('arsenal').s3routes.routesUtils; const { isValidBucketName } = require('arsenal').s3routes.routesUtils;
const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig; const validateAuthConfig = require('arsenal').auth.inMemory.validateAuthConfig;
const { buildAuthDataAccount } = require('./auth/in_memory/builder'); const { buildAuthDataAccount } = require('./auth/in_memory/builder');
const externalBackends = require('../constants').externalBackends; const validExternalBackends = require('../constants').externalBackends;
const { azureAccountNameRegex, base64Regex } = require('../constants'); const { azureAccountNameRegex, base64Regex } = require('../constants');
// whitelist IP, CIDR for health checks // whitelist IP, CIDR for health checks
const defaultHealthChecks = { allowFrom: ['127.0.0.1/8', '::1'] }; const defaultHealthChecks = { allowFrom: ['127.0.0.1/8', '::1'] };
const defaultLocalCache = { host: '127.0.0.1', port: 6379 }; const defaultLocalCache = { host: '127.0.0.1', port: 6379 };
const defaultExternalBackendsConfig = {
// eslint-disable-next-line camelcase
aws_s3: {
httpAgent: {
keepAlive: false,
keepAliveMsecs: 1000,
maxFreeSockets: 256,
maxSockets: null,
},
},
gcp: {
httpAgent: {
keepAlive: true,
keepAliveMsecs: 1000,
maxFreeSockets: 256,
maxSockets: null,
},
},
};
const gcpScope = 'https://www.googleapis.com/auth/cloud-platform'; function assertCertPaths(key, cert, ca, basePath) {
const certObj = {};
certObj.paths = {};
certObj.certs = {};
if (key) {
const keypath = key.startsWith('/') ? key : `${basePath}/${key}`;
assert.doesNotThrow(() =>
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${keypath}`);
certObj.paths.key = keypath;
certObj.certs.key = fs.readFileSync(keypath, 'ascii');
}
if (cert) {
const certpath = cert.startsWith('/') ? cert : `${basePath}/${cert}`;
assert.doesNotThrow(() =>
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${certpath}`);
certObj.paths.cert = certpath;
certObj.certs.cert = fs.readFileSync(certpath, 'ascii');
}
if (ca) {
const capath = ca.startsWith('/') ? ca : `${basePath}/${ca}`;
assert.doesNotThrow(() =>
fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`);
certObj.paths.ca = capath;
certObj.certs.ca = fs.readFileSync(capath, 'ascii');
}
return certObj;
}
function sproxydAssert(configSproxyd) { function sproxydAssert(configSproxyd) {
const sproxydFields = []; const sproxydFields = [];
@ -62,57 +111,18 @@ function gcpLocationConstraintAssert(location, locationObj) {
gcpEndpoint, gcpEndpoint,
bucketName, bucketName,
mpuBucketName, mpuBucketName,
overflowBucketName,
serviceCredentials,
} = locationObj.details; } = locationObj.details;
const serviceKeyFileFromEnv =
process.env[`${location}_GCP_SERVICE_KEYFILE`];
const serviceEmailFromEnv =
process.env[`${location}_GCP_SERVICE_EMAIL`];
const serviceKeyFromEnv =
process.env[`${location}_GCP_SERVICE_KEY`];
const serviceScopeFromEnv =
process.env[`${location}_GCP_SERVICE_SCOPE`];
const scopes = serviceScopeFromEnv || serviceCredentials &&
serviceCredentials.scopes || gcpScope;
const keyFilename = serviceKeyFileFromEnv || serviceCredentials &&
serviceCredentials.keyFilename;
const serviceEmail = serviceEmailFromEnv || serviceCredentials &&
serviceCredentials.serviceEmail;
const serviceKey = serviceKeyFromEnv || serviceCredentials &&
serviceCredentials.serviceKey;
const stringFields = [ const stringFields = [
gcpEndpoint, gcpEndpoint,
bucketName, bucketName,
mpuBucketName, mpuBucketName,
overflowBucketName,
]; ];
assert(typeof scopes === 'string', `bad config: ${location} ` +
'serviceCredentials scopes must be a string');
stringFields.forEach(field => { stringFields.forEach(field => {
if (field !== undefined) { if (field !== undefined) {
assert(typeof field === 'string', assert(typeof field === 'string',
`bad config: ${field} must be a string`); `bad config: ${field} must be a string`);
} }
}); });
assert.strictEqual(
[keyFilename, (serviceEmail && serviceKey)].some(param => param),
true, `bad location constriant: "${location}" ` +
'serviceCredentials keyFilename and/or both serviceEmail and ' +
'serviceKey must be set in locationConfig or environment variable');
if (keyFilename) {
assert.strictEqual(typeof keyFilename, 'string',
`bad location constriant: "${location}" serviceCredentials ` +
`keyFilename "${keyFilename}" must be a string`);
} else {
assert.strictEqual(typeof serviceEmail, 'string',
`bad location constriant: "${location}" serviceCredentials ` +
`serviceEmail "${serviceEmail}" must be a string`);
assert.strictEqual(typeof serviceKey, 'string',
`bad location constriant: "${location}"" serviceCredentials ` +
`serviceKey "${serviceKey}" must be a string`);
}
} }
function azureLocationConstraintAssert(location, locationObj) { function azureLocationConstraintAssert(location, locationObj) {
@ -158,7 +168,7 @@ function azureLocationConstraintAssert(location, locationObj) {
function locationConstraintAssert(locationConstraints) { function locationConstraintAssert(locationConstraints) {
const supportedBackends = const supportedBackends =
['mem', 'file', 'scality', ['mem', 'file', 'scality',
'mongodb'].concat(Object.keys(externalBackends)); 'mongodb'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object', assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object'); 'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => { Object.keys(locationConstraints).forEach(l => {
@ -170,25 +180,45 @@ function locationConstraintAssert(locationConstraints) {
assert(supportedBackends.indexOf(locationConstraints[l].type) > -1, assert(supportedBackends.indexOf(locationConstraints[l].type) > -1,
'bad config: locationConstraints[region].type must ' + 'bad config: locationConstraints[region].type must ' +
`be one of ${supportedBackends}`); `be one of ${supportedBackends}`);
assert(typeof locationConstraints[l].objectId === 'string',
'bad config: locationConstraints[region].objectId is ' +
'mandatory and must be a unique string across locations');
assert(Object.keys(locationConstraints)
.filter(loc => (locationConstraints[loc].objectId ===
locationConstraints[l].objectId))
.length === 1,
'bad config: location constraint objectId ' +
`"${locationConstraints[l].objectId}" is not unique across ` +
'configured locations');
assert(typeof locationConstraints[l].legacyAwsBehavior assert(typeof locationConstraints[l].legacyAwsBehavior
=== 'boolean', === 'boolean',
'bad config: locationConstraints[region]' + 'bad config: locationConstraints[region]' +
'.legacyAwsBehavior is mandatory and must be a boolean'); '.legacyAwsBehavior is mandatory and must be a boolean');
if (locationConstraints[l].details.serverSideEncryption !== undefined) { assert(['undefined', 'boolean'].includes(
assert(typeof locationConstraints[l].details.serverSideEncryption typeof locationConstraints[l].isTransient),
=== 'boolean', 'bad config: locationConstraints[region]' +
'.isTransient must be a boolean');
if (locationConstraints[l].sizeLimitGB !== undefined) {
assert(typeof locationConstraints[l].sizeLimitGB === 'number' ||
locationConstraints[l].sizeLimitGB === null,
'bad config: locationConstraints[region].sizeLimitGB ' +
'must be a number (in gigabytes)');
}
const details = locationConstraints[l].details;
assert(typeof details === 'object',
'bad config: locationConstraints[region].details is ' +
'mandatory and must be an object');
if (details.serverSideEncryption !== undefined) {
assert(typeof details.serverSideEncryption === 'boolean',
'bad config: locationConstraints[region]' + 'bad config: locationConstraints[region]' +
'.details.serverSideEncryption must be a boolean'); '.details.serverSideEncryption must be a boolean');
} }
assert(typeof locationConstraints[l].details
=== 'object',
'bad config: locationConstraints[region].details is ' +
'mandatory and must be an object');
const details = locationConstraints[l].details;
const stringFields = [ const stringFields = [
'awsEndpoint', 'awsEndpoint',
'bucketName', 'bucketName',
'credentialsProfile', 'credentialsProfile',
'region',
]; ];
stringFields.forEach(field => { stringFields.forEach(field => {
if (details[field] !== undefined) { if (details[field] !== undefined) {
@ -208,28 +238,6 @@ function locationConstraintAssert(locationConstraints) {
assert(typeof details.credentials.secretKey === 'string', assert(typeof details.credentials.secretKey === 'string',
'bad config: credentials must include secretKey as string'); 'bad config: credentials must include secretKey as string');
} }
if (details.proxy !== undefined) {
const { protocol, hostname, port, auth } = url.parse(details.proxy);
assert(protocol === 'http:' || protocol === 'https:',
'bad config: protocol must be http or https in ' +
'locationConstraints[region].details');
assert(typeof hostname === 'string' && hostname !== '',
'bad config: hostname must be a non-empty string');
if (port) {
const portInt = Number.parseInt(port, 10);
assert(!Number.isNaN(portInt) && portInt > 0, 'bad config: ' +
'locationConstraints[region].details port must be a ' +
'number greater than 0');
}
if (auth) {
assert(typeof auth === 'string',
'bad config: proxy auth must be string');
const authArray = auth.split(':');
assert(authArray.length === 2 && authArray[0].length > 0
&& authArray[1].length > 0, 'bad config: proxy auth ' +
'must be of format username:password');
}
}
if (details.https !== undefined) { if (details.https !== undefined) {
assert(typeof details.https === 'boolean', 'bad config: ' + assert(typeof details.https === 'boolean', 'bad config: ' +
'locationConstraints[region].details https must be a boolean'); 'locationConstraints[region].details https must be a boolean');
@ -237,6 +245,24 @@ function locationConstraintAssert(locationConstraints) {
// eslint-disable-next-line no-param-reassign // eslint-disable-next-line no-param-reassign
locationConstraints[l].details.https = true; locationConstraints[l].details.https = true;
} }
if (details.pathStyle !== undefined) {
assert(typeof details.pathStyle === 'boolean', 'bad config: ' +
'locationConstraints[region].pathStyle must be a boolean');
} else {
// eslint-disable-next-line no-param-reassign
locationConstraints[l].details.pathStyle = false;
}
if (details.supportsVersioning !== undefined) {
assert(typeof details.supportsVersioning === 'boolean',
'bad config: locationConstraints[region].supportsVersioning' +
'must be a boolean');
} else {
// default to true
// eslint-disable-next-line no-param-reassign
locationConstraints[l].details.supportsVersioning = true;
}
if (locationConstraints[l].type === 'azure') { if (locationConstraints[l].type === 'azure') {
azureLocationConstraintAssert(l, locationConstraints[l]); azureLocationConstraintAssert(l, locationConstraints[l]);
} }
@ -305,6 +331,9 @@ class Config extends EventEmitter {
if (process.env.S3_LOCATION_FILE !== undefined) { if (process.env.S3_LOCATION_FILE !== undefined) {
this.locationConfigPath = process.env.S3_LOCATION_FILE; this.locationConfigPath = process.env.S3_LOCATION_FILE;
} }
if (process.env.S3_REPLICATION_FILE !== undefined) {
this.replicationConfigPath = process.env.S3_REPLICATION_FILE;
}
// Read config automatically // Read config automatically
this._getLocationConfig(); this._getLocationConfig();
@ -397,6 +426,17 @@ class Config extends EventEmitter {
} catch (err) { } catch (err) {
throw new Error(`could not parse config file: ${err.message}`); throw new Error(`could not parse config file: ${err.message}`);
} }
if (this.replicationConfigPath) {
try {
const repData = fs.readFileSync(this.replicationConfigPath,
{ encoding: 'utf-8' });
const replicationEndpoints = JSON.parse(repData);
config.replicationEndpoints.push(...replicationEndpoints);
} catch (err) {
throw new Error(
`could not parse replication file: ${err.message}`);
}
}
this.port = 8000; this.port = 8000;
if (config.port !== undefined) { if (config.port !== undefined) {
@ -458,10 +498,10 @@ class Config extends EventEmitter {
assert.notStrictEqual(site, '', 'bad config: `site` property ' + assert.notStrictEqual(site, '', 'bad config: `site` property ' +
"of object in `replicationEndpoints` must not be ''"); "of object in `replicationEndpoints` must not be ''");
if (type !== undefined) { if (type !== undefined) {
assert(externalBackends[type], 'bad config: `type` ' + assert(validExternalBackends[type], 'bad config: `type` ' +
'property of `replicationEndpoints` object must be ' + 'property of `replicationEndpoints` object must be ' +
'a valid external backend (one of: "' + 'a valid external backend (one of: "' +
`${Object.keys(externalBackends).join('", "')}")`); `${Object.keys(validExternalBackends).join('", "')})`);
} else { } else {
assert.notStrictEqual(servers, undefined, 'bad config: ' + assert.notStrictEqual(servers, undefined, 'bad config: ' +
'each object of `replicationEndpoints` array that is ' + 'each object of `replicationEndpoints` array that is ' +
@ -480,6 +520,15 @@ class Config extends EventEmitter {
this.replicationEndpoints = replicationEndpoints; this.replicationEndpoints = replicationEndpoints;
} }
if (config.backbeat) {
const { backbeat } = config;
assert.strictEqual(typeof backbeat.host, 'string',
'bad config: backbeat host must be a string');
assert(Number.isInteger(backbeat.port) && backbeat.port > 0,
'bad config: backbeat port must be a positive integer');
this.backbeat = backbeat;
}
// legacy // legacy
if (config.regions !== undefined) { if (config.regions !== undefined) {
throw new Error('bad config: regions key is deprecated. ' + throw new Error('bad config: regions key is deprecated. ' +
@ -626,6 +675,8 @@ class Config extends EventEmitter {
this.dataDaemon.dataPath = this.dataDaemon.dataPath =
process.env.S3DATAPATH ? process.env.S3DATAPATH ?
process.env.S3DATAPATH : `${__dirname}/../localData`; process.env.S3DATAPATH : `${__dirname}/../localData`;
this.dataDaemon.noSync = process.env.S3DATA_NOSYNC === 'true';
this.dataDaemon.noCache = process.env.S3DATA_NOCACHE === 'true';
} }
if (config.metadataDaemon) { if (config.metadataDaemon) {
@ -669,21 +720,53 @@ class Config extends EventEmitter {
assert(typeof config.localCache === 'object', assert(typeof config.localCache === 'object',
'config: invalid local cache configuration. localCache must ' + 'config: invalid local cache configuration. localCache must ' +
'be an object'); 'be an object');
assert(typeof config.localCache.host === 'string', if (config.localCache.sentinels) {
'config: invalid host for localCache. host must be a string'); this.localCache = { sentinels: [], name: null };
assert(typeof config.localCache.port === 'number',
'config: invalid port for localCache. port must be a number'); assert(typeof config.localCache.name === 'string',
if (config.localCache.password !== undefined) { 'bad config: localCache sentinel name must be a string');
assert( this.localCache.name = config.localCache.name;
this._verifyRedisPassword(config.localCache.password),
'config: invalid password for localCache. password must' + assert(Array.isArray(config.localCache.sentinels) ||
typeof config.localCache.sentinels === 'string',
'bad config: localCache sentinels' +
'must be an array or string');
if (typeof config.localCache.sentinels === 'string') {
config.localCache.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.localCache.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.localCache.sentinels)) {
config.localCache.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: localCache' +
'sentinel host must be a string');
assert(typeof port === 'number',
'bad config: localCache' +
'sentinel port must be a number');
this.localCache.sentinels.push({ host, port });
});
}
} else {
assert(typeof config.localCache.host === 'string',
'config: bad host for localCache. host must be a string');
assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) {
assert(
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' +
' be a string'); ' be a string');
}
this.localCache = {
host: config.localCache.host,
port: config.localCache.port,
password: config.localCache.password,
};
} }
this.localCache = {
host: config.localCache.host,
port: config.localCache.port,
password: config.localCache.password,
};
} }
if (config.mongodb) { if (config.mongodb) {
@ -762,49 +845,52 @@ class Config extends EventEmitter {
assert(config.localCache, 'missing required property of utapi ' + assert(config.localCache, 'missing required property of utapi ' +
'configuration: localCache'); 'configuration: localCache');
this.utapi.localCache = config.localCache; this.utapi.localCache = config.localCache;
assert(config.utapi.redis, 'missing required property of utapi ' + assert(config.redis, 'missing required property of utapi ' +
'configuration: redis'); 'configuration: redis');
if (config.utapi.redis.sentinels) { if (config.utapi.redis) {
this.utapi.redis = { sentinels: [], name: null }; if (config.utapi.redis.sentinels) {
this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string', assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string'); 'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name; this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels), assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array'); 'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => { config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item; const { host, port } = item;
assert(typeof host === 'string', assert(typeof host === 'string',
'bad config: redis sentinel host must be a string'); 'bad config: redis sentinel host must be a string');
assert(typeof port === 'number', assert(typeof port === 'number',
'bad config: redis sentinel port must be a number'); 'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port }); this.utapi.redis.sentinels.push({ host, port });
}); });
} else { } else {
// check for standalone configuration // check for standalone configuration
this.utapi.redis = {}; this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string', assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string'); 'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number', assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number'); 'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host; this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port; this.utapi.redis.port = config.utapi.redis.port;
} }
if (config.utapi.redis.password !== undefined) { if (config.utapi.redis.password !== undefined) {
assert( assert(
this._verifyRedisPassword(config.utapi.redis.password), this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' + 'config: invalid password for utapi redis. password' +
' must be a string'); ' must be a string');
this.utapi.redis.password = config.utapi.redis.password; this.utapi.redis.password = config.utapi.redis.password;
} }
if (config.utapi.redis.sentinelPassword !== undefined) { if (config.utapi.redis.sentinelPassword !== undefined) {
assert( assert(
this._verifyRedisPassword(config.utapi.redis.sentinelPassword), this._verifyRedisPassword(
'config: invalid password for utapi redis. password' + config.utapi.redis.sentinelPassword),
' must be a string'); 'config: invalid password for utapi redis. password' +
this.utapi.redis.sentinelPassword = ' must be a string');
config.utapi.redis.sentinelPassword; this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
} }
if (config.utapi.metrics) { if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics; this.utapi.metrics = config.utapi.metrics;
@ -878,6 +964,13 @@ class Config extends EventEmitter {
this.utapi.reindex = config.utapi.reindex; this.utapi.reindex = config.utapi.reindex;
} }
} }
if (Object.keys(this.locationConstraints).some(
loc => this.locationConstraints[loc].sizeLimitGB)) {
assert(this.utapi && this.utapi.metrics &&
this.utapi.metrics.includes('location'),
'bad config: if storage size limit set on a location ' +
'constraint, Utapi must also be configured correctly');
}
this.log = { logLevel: 'debug', dumpLevel: 'error' }; this.log = { logLevel: 'debug', dumpLevel: 'error' };
if (config.log !== undefined) { if (config.log !== undefined) {
@ -1040,38 +1133,77 @@ class Config extends EventEmitter {
!config.certFilePaths.ca) !config.certFilePaths.ca)
); );
} }
const { key, cert, ca } = config.certFilePaths ? const { key, cert, ca } = config.certFilePaths ?
config.certFilePaths : {}; config.certFilePaths : {};
let certObj = undefined;
if (key && cert) { if (key && cert) {
const keypath = (key[0] === '/') ? key : `${this._basePath}/${key}`; certObj = assertCertPaths(key, cert, ca, this._basePath);
const certpath = (cert[0] === '/') ?
cert : `${this._basePath}/${cert}`;
let capath;
if (ca) {
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
assert.doesNotThrow(() =>
fs.accessSync(capath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${capath}`);
}
assert.doesNotThrow(() =>
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${keypath}`);
assert.doesNotThrow(() =>
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
`File not found or unreachable: ${certpath}`);
this.https = {
cert: fs.readFileSync(certpath, 'ascii'),
key: fs.readFileSync(keypath, 'ascii'),
ca: ca ? fs.readFileSync(capath, 'ascii') : undefined,
};
this.httpsPath = {
ca: capath,
cert: certpath,
};
} else if (key || cert) { } else if (key || cert) {
throw new Error('bad config: both certFilePaths.key and ' + throw new Error('bad config: both certFilePaths.key and ' +
'certFilePaths.cert must be defined'); 'certFilePaths.cert must be defined');
} }
if (certObj) {
if (Object.keys(certObj.certs).length > 0) {
this.https = certObj.certs;
}
if (Object.keys(certObj.paths).length > 0) {
this.httpsPath = certObj.paths;
}
}
this.outboundProxy = {};
const envProxy = process.env.HTTP_PROXY || process.env.HTTPS_PROXY
|| process.env.http_proxy || process.env.https_proxy;
const p = config.outboundProxy;
const proxyUrl = envProxy || (p ? p.url : '');
if (proxyUrl) {
assert(typeof proxyUrl === 'string',
'bad proxy config: url must be a string');
const { protocol, hostname, port, auth } = url.parse(proxyUrl);
assert(protocol === 'http:' || protocol === 'https:',
'bad proxy config: protocol must be http or https');
assert(typeof hostname === 'string' && hostname !== '',
'bad proxy config: hostname must be a non-empty string');
if (port) {
const portInt = Number.parseInt(port, 10);
assert(!Number.isNaN(portInt) && portInt > 0,
'bad proxy config: port must be a number greater than 0');
}
if (auth) {
assert(typeof auth === 'string',
'bad proxy config: auth must be string');
const authArray = auth.split(':');
assert(authArray.length === 2 && authArray[0].length > 0
&& authArray[1].length > 0, 'bad proxy config: ' +
'auth must be of format username:password');
}
this.outboundProxy.url = proxyUrl;
this.outboundProxy.certs = {};
const envCert = process.env.HTTPS_PROXY_CERTIFICATE;
const key = p ? p.key : '';
const cert = p ? p.cert : '';
const caBundle = envCert || (p ? p.caBundle : '');
if (p) {
assert(typeof p === 'object',
'bad config: "proxy" should be an object');
}
if (key) {
assert(typeof key === 'string',
'bad config: proxy.key should be a string');
}
if (cert) {
assert(typeof cert === 'string',
'bad config: proxy.cert should be a string');
}
if (caBundle) {
assert(typeof caBundle === 'string',
'bad config: proxy.caBundle should be a string');
}
const certObj =
assertCertPaths(key, cert, caBundle, this._basePath);
this.outboundProxy.certs = certObj.certs;
}
// Ephemeral token to protect the reporting endpoint: // Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file, // try inherited from parent first, then hardcoded in conf file,
@ -1080,6 +1212,41 @@ class Config extends EventEmitter {
process.env.REPORT_TOKEN || process.env.REPORT_TOKEN ||
config.reportToken || config.reportToken ||
uuid.v4().toString(); uuid.v4().toString();
this.reportEndpoint = process.env.REPORT_ENDPOINT;
// External backends
// Currently supports configuring httpAgent(s) for keepAlive
this.externalBackends = defaultExternalBackendsConfig;
if (config.externalBackends) {
const extBackendsConfig = Object.keys(config.externalBackends);
extBackendsConfig.forEach(b => {
// assert that it's a valid backend
assert(validExternalBackends[b] !== undefined,
`bad config: ${b} is not one of valid external backends: ` +
`${Object.keys(validExternalBackends).join(', ')}`);
const { httpAgent } = config.externalBackends[b];
assert(typeof httpAgent === 'object',
`bad config: ${b} must have httpAgent object defined`);
const { keepAlive, keepAliveMsecs, maxFreeSockets, maxSockets }
= httpAgent;
assert(typeof keepAlive === 'boolean',
`bad config: ${b}.httpAgent.keepAlive must be a boolean`);
assert(typeof keepAliveMsecs === 'number' &&
httpAgent.keepAliveMsecs > 0,
`bad config: ${b}.httpAgent.keepAliveMsecs must be` +
' a number > 0');
assert(typeof maxFreeSockets === 'number' &&
httpAgent.maxFreeSockets >= 0,
`bad config: ${b}.httpAgent.maxFreeSockets must be ` +
'a number >= 0');
assert((typeof maxSockets === 'number' && maxSockets >= 0) ||
maxSockets === null,
`bad config: ${b}.httpAgent.maxFreeSockets must be ` +
'null or a number >= 0');
Object.assign(this.externalBackends[b].httpAgent, httpAgent);
});
}
} }
_configureBackends() { _configureBackends() {
@ -1087,7 +1254,7 @@ class Config extends EventEmitter {
* Configure the backends for Authentication, Data and Metadata. * Configure the backends for Authentication, Data and Metadata.
*/ */
let auth = 'mem'; let auth = 'mem';
let data = 'file'; let data = 'multiple';
let metadata = 'file'; let metadata = 'file';
let kms = 'file'; let kms = 'file';
if (process.env.S3BACKEND) { if (process.env.S3BACKEND) {
@ -1172,14 +1339,17 @@ class Config extends EventEmitter {
const { const {
bucketName, bucketName,
mpuBucketName, mpuBucketName,
overflowBucketName,
} = this.locationConstraints[locationConstraint].details; } = this.locationConstraints[locationConstraint].details;
return { bucketName, mpuBucketName, overflowBucketName }; return { bucketName, mpuBucketName };
} }
getLocationConstraintType(locationConstraint) { getLocationConstraintType(locationConstraint) {
return this.locationConstraints[locationConstraint] && const dataStoreName = this.locationConstraints[locationConstraint];
this.locationConstraints[locationConstraint].type; return dataStoreName && dataStoreName.type;
}
getLocationConstraint(locationConstraintName) {
return this.locationConstraints[locationConstraintName];
} }
setRestEndpoints(restEndpoints) { setRestEndpoints(restEndpoints) {
@ -1194,6 +1364,12 @@ class Config extends EventEmitter {
this.emit('location-constraints-update'); this.emit('location-constraints-update');
} }
setReplicationEndpoints(locationConstraints) {
this.replicationEndpoints =
Object.keys(locationConstraints)
.map(key => ({ site: key, type: locationConstraints[key].type }));
}
getAzureEndpoint(locationConstraint) { getAzureEndpoint(locationConstraint) {
let azureStorageEndpoint = let azureStorageEndpoint =
process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] || process.env[`${locationConstraint}_AZURE_STORAGE_ENDPOINT`] ||
@ -1241,33 +1417,6 @@ class Config extends EventEmitter {
return this.locationConstraints[locationConstraint].details return this.locationConstraints[locationConstraint].details
.serverSideEncryption === true; .serverSideEncryption === true;
} }
getGcpServiceParams(locationConstraint) {
const { serviceCredentials } =
this.locationConstraints[locationConstraint].details;
const serviceKeyFileFromEnv =
process.env[`${locationConstraint}_GCP_SERVICE_KEYFILE`];
const serviceEmailFromEnv =
process.env[`${locationConstraint}_GCP_SERVICE_EMAIL`];
const serviceKeyFromEnv =
process.env[`${locationConstraint}_GCP_SERVICE_KEY`];
const serviceScopeFromEnv =
process.env[`${locationConstraint}_GCP_SERVICE_SCOPE`];
return {
scopes: serviceScopeFromEnv || serviceCredentials &&
serviceCredentials.scopes || gcpScope,
keyFilename: serviceKeyFileFromEnv || serviceCredentials &&
serviceCredentials.keyFilename,
/* eslint-disable camelcase */
credentials: {
client_email: serviceEmailFromEnv || serviceCredentials &&
serviceCredentials.serviceEmail,
private_key: serviceKeyFromEnv || serviceCredentials &&
serviceCredentials.serviceKey,
},
/* eslint-enable camelcase */
};
}
} }
module.exports = { module.exports = {

View File

@ -9,6 +9,7 @@ const createKeyForUserBucket = require('./createKeyForUserBucket');
const metadata = require('../../../metadata/wrapper'); const metadata = require('../../../metadata/wrapper');
const kms = require('../../../kms/wrapper'); const kms = require('../../../kms/wrapper');
const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior'); const isLegacyAWSBehavior = require('../../../utilities/legacyAWSBehavior');
const { isBackbeatUser } = require('../authorization/aclChecks');
const usersBucket = constants.usersBucket; const usersBucket = constants.usersBucket;
const oldUsersBucket = constants.oldUsersBucket; const oldUsersBucket = constants.oldUsersBucket;
@ -165,9 +166,12 @@ function createBucket(authInfo, bucketName, headers,
const ownerDisplayName = const ownerDisplayName =
authInfo.getAccountDisplayName(); authInfo.getAccountDisplayName();
const creationDate = new Date().toJSON(); const creationDate = new Date().toJSON();
const isNFSEnabled = headers['x-scal-nfs-enabled'] === 'true';
const bucket = new BucketInfo(bucketName, const bucket = new BucketInfo(bucketName,
canonicalID, ownerDisplayName, creationDate, canonicalID, ownerDisplayName, creationDate,
BucketInfo.currentModelVersion()); BucketInfo.currentModelVersion(), null, null, null,
null, null, null, null, null, null, null, null,
null, isNFSEnabled);
if (locationConstraint !== undefined) { if (locationConstraint !== undefined) {
bucket.setLocationConstraint(locationConstraint); bucket.setLocationConstraint(locationConstraint);
@ -210,7 +214,8 @@ function createBucket(authInfo, bucketName, headers,
} }
const existingBucketMD = results.getAnyExistingBucketInfo; const existingBucketMD = results.getAnyExistingBucketInfo;
if (existingBucketMD instanceof BucketInfo && if (existingBucketMD instanceof BucketInfo &&
existingBucketMD.getOwner() !== canonicalID) { existingBucketMD.getOwner() !== canonicalID &&
!isBackbeatUser(canonicalID)) {
// return existingBucketMD to collect cors headers // return existingBucketMD to collect cors headers
return cb(errors.BucketAlreadyExists, existingBucketMD); return cb(errors.BucketAlreadyExists, existingBucketMD);
} }

View File

@ -0,0 +1,26 @@
const { errors } = require('arsenal');
function checkPreferredLocations(location, locationConstraints, log) {
const retError = loc => {
const errMsg = 'value of the location you are attempting to set - ' +
`${loc} - is not listed in the locationConstraint config`;
log.trace(`locationConstraint is invalid - ${errMsg}`,
{ locationConstraint: loc });
return errors.InvalidLocationConstraint.customizeDescription(errMsg);
};
if (typeof location === 'string' && !locationConstraints[location]) {
return retError(location);
}
if (typeof location === 'object') {
const { read, write } = location;
if (!locationConstraints[read]) {
return retError(read);
}
if (!locationConstraints[write]) {
return retError(write);
}
}
return null;
}
module.exports = checkPreferredLocations;

View File

@ -0,0 +1,19 @@
/**
* parse LIKE expressions
* @param {string} regex - regex pattern
* @return {object} MongoDB search object
*/
function parseLikeExpression(regex) {
if (typeof regex !== 'string') {
return null;
}
const split = regex.split('/');
if (split.length < 3 || split[0] !== '') {
return { $regex: regex };
}
const pattern = split.slice(1, split.length - 1).join('/');
const regexOpt = split[split.length - 1];
return { $regex: new RegExp(pattern), $options: regexOpt };
}
module.exports = parseLikeExpression;

View File

@ -0,0 +1,85 @@
const parseLikeExpression = require('./parseLikeExpression');
/*
This code is based on code from https://github.com/olehch/sqltomongo
with the following license:
The MIT License (MIT)
Copyright (c) 2016 Oleh
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/**
* A helper object to map SQL-like naming to MongoDB query syntax
*/
const exprMapper = {
'=': '$eq',
'!=': '$ne',
'<>': '$ne',
'>': '$gt',
'<': '$lt',
'>=': '$gte',
'<=': '$lte',
'LIKE': '$regex',
};
/*
* Parses object with WHERE clause recursively
* and generates MongoDB `find` query object
*/
function parseWhere(root) {
const operator = Object.keys(root)[0];
// extract leaf binary expressions
if (operator === 'AND') {
const e1 = parseWhere(root[operator][0]);
const e2 = parseWhere(root[operator][1]);
// eslint-disable-next-line
return { '$and' : [
e1,
e2,
] };
} else if (operator === 'OR') {
const e1 = parseWhere(root[operator][0]);
const e2 = parseWhere(root[operator][1]);
// eslint-disable-next-line
return { '$or' : [
e1,
e2,
] };
}
const field = root[operator][0];
const value = root[operator][1];
const expr = exprMapper[operator];
const obj = {};
if (operator === 'LIKE') {
obj[`value.${field}`] = parseLikeExpression(value);
} else {
obj[`value.${field}`] = { [expr]: value };
}
return obj;
}
module.exports = parseWhere;

View File

@ -0,0 +1,32 @@
const config = require('../../../Config').config;
/**
* Validates that the replication configuration contains a preferred
* read location if the bucket location is a transient source
*
* @param {object} repConfig - replication configuration
* @param {object} bucket - bucket metadata
*
* @return {boolean} validity of replication configuration with
* transient source
*/
function validateReplicationConfig(repConfig, bucket) {
const bucketLocationName = bucket.getLocationConstraint();
if (!repConfig || !repConfig.rules) {
return false;
}
const bucketLocation = config.locationConstraints[bucketLocationName];
if (!bucketLocation.isTransient) {
return true;
}
return repConfig.rules.every(rule => {
if (!rule.storageClass) {
return true;
}
const storageClasses = rule.storageClass.split(',');
return storageClasses.some(
site => site.endsWith(':preferred_read'));
});
}
module.exports = validateReplicationConfig;

View File

@ -0,0 +1,96 @@
const Parser = require('sql-where-parser');
const { errors } = require('arsenal');
const objModel = require('arsenal').models.ObjectMD;
const BINARY_OP = 2;
const sqlConfig = {
operators: [
{
'=': BINARY_OP,
'<': BINARY_OP,
'>': BINARY_OP,
'<>': BINARY_OP,
'<=': BINARY_OP,
'>=': BINARY_OP,
'!=': BINARY_OP,
},
{ LIKE: BINARY_OP },
{ AND: BINARY_OP },
{ OR: BINARY_OP },
],
tokenizer: {
shouldTokenize: ['(', ')', '=', '!=', '<', '>', '<=', '>=', '<>'],
shouldMatch: ['"', '\'', '`'],
shouldDelimitBy: [' ', '\n', '\r', '\t'],
},
};
const parser = new Parser(sqlConfig);
function _validateTree(whereClause, possibleAttributes) {
let invalidAttribute;
function _searchTree(node) {
if (typeof node !== 'object') {
invalidAttribute = node;
} else {
const operator = Object.keys(node)[0];
if (operator === 'AND' || operator === 'OR') {
_searchTree(node[operator][0]);
_searchTree(node[operator][1]);
} else {
const field = node[operator][0];
if (!field.startsWith('tags.') &&
!possibleAttributes[field] &&
!field.startsWith('replicationInfo.') &&
!field.startsWith('x-amz-meta-')) {
invalidAttribute = field;
}
}
}
}
_searchTree(whereClause);
return invalidAttribute;
}
/**
* validateSearchParams - validate value of ?search= in request
* @param {string} searchParams - value of search params in request
* which should be jsu sql where clause
* For metadata: x-amz-meta-color=\"blue\"
* For tags: tags.x-amz-meta-color=\"blue\"
* For replication status : replication-status=\"PENDING\"
* For any other attribute: `content-length`=5
* @return {undefined | error} undefined if validates or arsenal error if not
*/
function validateSearchParams(searchParams) {
let ast;
try {
// allow using 'replicationStatus' as search param to increase
// ease of use, pending metadata search rework
// eslint-disable-next-line no-param-reassign
searchParams = searchParams.replace(
'replication-status', 'replicationInfo.status');
ast = parser.parse(searchParams);
} catch (e) {
if (e) {
return {
error: errors.InvalidArgument
.customizeDescription('Invalid sql where clause ' +
'sent as search query'),
};
}
}
const possibleAttributes = objModel.getAttributes();
const invalidAttribute = _validateTree(ast, possibleAttributes);
if (invalidAttribute) {
return {
error: errors.InvalidArgument
.customizeDescription('Search param ' +
`contains unknown attribute: ${invalidAttribute}`) };
}
return {
ast,
};
}
module.exports = validateSearchParams;

View File

@ -0,0 +1,27 @@
/**
* checkReadLocation - verify that a bucket's default read location exists
* for a specified read data locator
* @param {Config} config - Config object
* @param {string} locationName - location constraint
* @param {string} objectKey - object key
* @param {string} bucketName - bucket name
* @return {Object | null} return object containing location information
* if location exists; otherwise, null
*/
function checkReadLocation(config, locationName, objectKey, bucketName) {
const readLocation = config.getLocationConstraint(locationName);
if (readLocation) {
const bucketMatch = readLocation.details &&
readLocation.details.bucketMatch;
const backendKey = bucketMatch ? objectKey :
`${bucketName}/${objectKey}`;
return {
location: locationName,
key: backendKey,
locationType: readLocation.type,
};
}
return null;
}
module.exports = checkReadLocation;

View File

@ -19,7 +19,7 @@ const {
} = constants; } = constants;
const externalVersioningErrorMessage = 'We do not currently support putting ' + const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure.'; 'a versioned object to a location-constraint of type Azure or GCP.';
/** /**
* Retro-propagation is where S3C ingestion will re-ingest an object whose * Retro-propagation is where S3C ingestion will re-ingest an object whose
@ -136,7 +136,8 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
size, size,
headers, headers,
isDeleteMarker, isDeleteMarker,
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size), replicationInfo: getReplicationInfo(
objectKey, bucketMD, false, size, null, null, authInfo),
log, log,
}; };
if (!isDeleteMarker) { if (!isDeleteMarker) {

View File

@ -0,0 +1,51 @@
const { errors } = require('arsenal');
/**
* getReplicationBackendDataLocator - compares given location constraint to
* replication backends
* @param {object} locationObj - object containing location information
* @param {string} locationObj.location - name of location constraint
* @param {string} locationObj.key - keyname of object in location constraint
* @param {string} locationObj.locationType - type of location constraint
* @param {object} replicationInfo - information about object replication
* @param {array} replicationInfo.backends - array containing information about
* each replication location
* @param {string} replicationInfo.backends[].site - name of replication
* location
* @param {string} replicationInfo.backends[].status - status of replication
* @param {string} replicationInfo.backends[].dataStoreVersionId - version id
* of object at replication location
* @return {object} res - response object
* {array} [res.dataLocator] - if COMPLETED status: array
* containing the cloud location,
* undefined otherwise
* {string} [res.status] - replication status if no error
* {string} [res.reason] - reason message if PENDING/FAILED
* {Error} [res.error] - defined if object is not replicated to
* location passed in locationObj
*/
function getReplicationBackendDataLocator(locationObj, replicationInfo) {
const repBackendResult = {};
const locMatch = replicationInfo.backends.find(
backend => backend.site === locationObj.location);
if (!locMatch) {
repBackendResult.error = errors.InvalidLocationConstraint.
customizeDescription('Object is not replicated to location ' +
'passed in location header');
return repBackendResult;
}
repBackendResult.status = locMatch.status;
if (['PENDING', 'FAILED'].includes(locMatch.status)) {
repBackendResult.reason =
`Object replication to specified backend is ${locMatch.status}`;
return repBackendResult;
}
repBackendResult.dataLocator = [{
key: locationObj.key,
dataStoreName: locationObj.location,
dataStoreType: locationObj.locationType,
dataStoreVersionId: locMatch.dataStoreVersionId }];
return repBackendResult;
}
module.exports = getReplicationBackendDataLocator;

View File

@ -1,4 +1,6 @@
const s3config = require('../../../Config').config; const s3config = require('../../../Config').config;
const constants = require('../../../../constants');
const { isBackbeatUser } = require('../authorization/aclChecks');
function _getBackend(objectMD, site) { function _getBackend(objectMD, site) {
const backends = objectMD ? objectMD.replicationInfo.backends : []; const backends = objectMD ? objectMD.replicationInfo.backends : [];
@ -29,16 +31,19 @@ function _getStorageClasses(rule) {
} }
function _getReplicationInfo(rule, replicationConfig, content, operationType, function _getReplicationInfo(rule, replicationConfig, content, operationType,
objectMD) { objectMD, bucketMD) {
const storageTypes = []; const storageTypes = [];
const backends = []; const backends = [];
const storageClasses = _getStorageClasses(rule); const storageClasses = _getStorageClasses(rule);
storageClasses.forEach(storageClass => { storageClasses.forEach(storageClass => {
const location = s3config.locationConstraints[storageClass]; const storageClassName =
if (location && ['aws_s3', 'azure'].includes(location.type)) { storageClass.endsWith(':preferred_read') ?
storageClass.split(':')[0] : storageClass;
const location = s3config.locationConstraints[storageClassName];
if (location && constants.replicationBackends[location.type]) {
storageTypes.push(location.type); storageTypes.push(location.type);
} }
backends.push(_getBackend(objectMD, storageClass)); backends.push(_getBackend(objectMD, storageClassName));
}); });
if (storageTypes.length > 0 && operationType) { if (storageTypes.length > 0 && operationType) {
content.push(operationType); content.push(operationType);
@ -51,6 +56,7 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
storageClass: storageClasses.join(','), storageClass: storageClasses.join(','),
role: replicationConfig.role, role: replicationConfig.role,
storageType: storageTypes.join(','), storageType: storageTypes.join(','),
isNFS: bucketMD.isNFS(),
}; };
} }
@ -63,19 +69,34 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
* @param {boolean} objSize - The size, in bytes, of the object being PUT * @param {boolean} objSize - The size, in bytes, of the object being PUT
* @param {string} operationType - The type of operation to replicate * @param {string} operationType - The type of operation to replicate
* @param {object} objectMD - The object metadata * @param {object} objectMD - The object metadata
* @param {AuthInfo} [authInfo] - authentication info of object owner
* @return {undefined} * @return {undefined}
*/ */
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType, function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
objectMD) { objectMD, authInfo) {
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA']; const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
const config = bucketMD.getReplicationConfiguration(); const config = bucketMD.getReplicationConfiguration();
// If bucket does not have a replication configuration, do not replicate.
if (config) { // Do not replicate object in the following cases:
//
// - bucket does not have a replication configuration
//
// - replication configuration does not apply to the object
// (i.e. no rule matches object prefix)
//
// - replication configuration applies to the object (i.e. a rule matches
// object prefix) but the status is disabled
//
// - object owner is an internal service account like Lifecycle
// (because we do not want to replicate objects created from
// actions triggered by internal services, by design)
if (config && (!authInfo || !isBackbeatUser(authInfo.getCanonicalID()))) {
const rule = config.rules.find(rule => const rule = config.rules.find(rule =>
(objKey.startsWith(rule.prefix) && rule.enabled)); (objKey.startsWith(rule.prefix) && rule.enabled));
if (rule) { if (rule) {
return _getReplicationInfo(rule, config, content, operationType, return _getReplicationInfo(rule, config, content, operationType,
objectMD); objectMD, bucketMD);
} }
} }
return undefined; return undefined;

View File

@ -0,0 +1,37 @@
const { errors } = require('arsenal');
const { config } = require('../../../Config');
/**
* locationHeaderCheck - compares 'x-amz-location-constraint' header
* to location constraints in config
* @param {object} headers - request headers
* @param {string} objectKey - key name of object
* @param {string} bucketName - name of bucket
* @return {undefined|Object} returns error, object, or undefined
* @return {string} return.location - name of location constraint
* @return {string} return.key - name of object at location constraint
* @return {string} - return.locationType - type of location constraint
*/
function locationHeaderCheck(headers, objectKey, bucketName) {
const location = headers['x-amz-location-constraint'];
if (location) {
const validLocation = config.locationConstraints[location];
if (!validLocation) {
return errors.InvalidLocationConstraint.customizeDescription(
'Invalid location constraint specified in header');
}
const bucketMatch = validLocation.details.bucketMatch;
const backendKey = bucketMatch ? objectKey :
`${bucketName}/${objectKey}`;
return {
location,
key: backendKey,
locationType: validLocation.type,
};
}
// no location header was passed
return undefined;
}
module.exports = locationHeaderCheck;

View File

@ -0,0 +1,48 @@
const { errors } = require('arsenal');
const { config } = require('../../../Config');
const { getLocationMetric, pushLocationMetric } =
require('../../../utapi/utilities');
function _gbToBytes(gb) {
return gb * 1024 * 1024 * 1024;
}
/**
* locationStorageCheck - will ensure there is enough space left for object on
* PUT operations, or will update metric on DELETE
* NOTE: storage limit may not be exactly enforced in the case of concurrent
* requests when near limit
* @param {string} location - name of location to check quota
* @param {number} updateSize - new size to check against quota in bytes
* @param {object} log - werelogs logger
* @param {function} cb - callback function
* @return {undefined}
*/
function locationStorageCheck(location, updateSize, log, cb) {
const lc = config.locationConstraints;
const sizeLimitGB = lc[location] ? lc[location].sizeLimitGB : undefined;
if (updateSize === 0 || sizeLimitGB === undefined || sizeLimitGB === null) {
return cb();
}
// no need to list location metric, since it should be decreased
if (updateSize < 0) {
return pushLocationMetric(location, updateSize, log, cb);
}
return getLocationMetric(location, log, (err, bytesStored) => {
if (err) {
log.error(`Error listing metrics from Utapi: ${err.message}`);
return cb(err);
}
const newStorageSize = parseInt(bytesStored, 10) + updateSize;
const sizeLimitBytes = _gbToBytes(sizeLimitGB);
if (sizeLimitBytes < newStorageSize) {
return cb(errors.AccessDenied.customizeDescription(
`The assigned storage space limit for location ${location} ` +
'will be exceeded'));
}
return pushLocationMetric(location, updateSize, log, cb);
});
}
module.exports = locationStorageCheck;

View File

@ -238,4 +238,5 @@ splitter, log) {
module.exports = { module.exports = {
generateMpuPartStorageInfo, generateMpuPartStorageInfo,
validateAndFilterMpuParts, validateAndFilterMpuParts,
createAggregateETag,
}; };

View File

@ -4,6 +4,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const deleteBucket = require('./apiUtils/bucket/bucketDeletion'); const deleteBucket = require('./apiUtils/bucket/bucketDeletion');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* bucketDelete - DELETE bucket (currently supports only non-versioned buckets) * bucketDelete - DELETE bucket (currently supports only non-versioned buckets)
@ -20,6 +21,8 @@ function bucketDelete(authInfo, request, log, cb) {
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics(
'DELETE', request.bucketName, 403, 'deleteBucket');
return cb(errors.AccessDenied); return cb(errors.AccessDenied);
} }
const bucketName = request.bucketName; const bucketName = request.bucketName;
@ -37,6 +40,8 @@ function bucketDelete(authInfo, request, log, cb) {
if (err) { if (err) {
log.debug('error processing request', log.debug('error processing request',
{ method: 'metadataValidateBucket', error: err }); { method: 'metadataValidateBucket', error: err });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket');
return cb(err, corsHeaders); return cb(err, corsHeaders);
} }
log.trace('passed checks', log.trace('passed checks',
@ -44,12 +49,16 @@ function bucketDelete(authInfo, request, log, cb) {
return deleteBucket(authInfo, bucketMD, bucketName, return deleteBucket(authInfo, bucketMD, bucketName,
authInfo.getCanonicalID(), log, err => { authInfo.getCanonicalID(), log, err => {
if (err) { if (err) {
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucket');
return cb(err, corsHeaders); return cb(err, corsHeaders);
} }
pushMetric('deleteBucket', log, { pushMetric('deleteBucket', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'DELETE', bucketName, '204', 'deleteBucket');
return cb(null, corsHeaders); return cb(null, corsHeaders);
}); });
}); });

View File

@ -6,6 +6,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteCors'; const requestType = 'bucketDeleteCors';
@ -27,9 +28,13 @@ function bucketDeleteCors(authInfo, request, log, callback) {
request.method, bucket); request.method, bucket);
if (err) { if (err) {
log.debug('metadata getbucket failed', { error: err }); log.debug('metadata getbucket failed', { error: err });
monitoring.promMetrics('DELETE', bucketName, 400,
'deleteBucketCors');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, requestType)) { if (bucketShield(bucket, requestType)) {
monitoring.promMetrics('DELETE', bucketName, 400,
'deleteBucketCors');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
@ -39,6 +44,8 @@ function bucketDeleteCors(authInfo, request, log, callback) {
requestType, requestType,
method: 'bucketDeleteCors', method: 'bucketDeleteCors',
}); });
monitoring.promMetrics('DELETE', bucketName, 403,
'deleteBucketCors');
return callback(errors.AccessDenied, corsHeaders); return callback(errors.AccessDenied, corsHeaders);
} }
@ -58,12 +65,16 @@ function bucketDeleteCors(authInfo, request, log, callback) {
bucket.setCors(null); bucket.setCors(null);
return metadata.updateBucket(bucketName, bucket, log, err => { return metadata.updateBucket(bucketName, bucket, log, err => {
if (err) { if (err) {
monitoring.promMetrics('DELETE', bucketName, 400,
'deleteBucketCors');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('deleteBucketCors', log, { pushMetric('deleteBucketCors', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'DELETE', bucketName, '204', 'deleteBucketCors');
return callback(err, corsHeaders); return callback(err, corsHeaders);
}); });
}); });

View File

@ -2,6 +2,7 @@ const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* bucketDeleteLifecycle - Delete the bucket Lifecycle configuration * bucketDeleteLifecycle - Delete the bucket Lifecycle configuration
@ -26,6 +27,8 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
error: err, error: err,
method: 'bucketDeleteLifecycle', method: 'bucketDeleteLifecycle',
}); });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketLifecycle');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
if (!bucket.getLifecycleConfiguration()) { if (!bucket.getLifecycleConfiguration()) {
@ -42,12 +45,16 @@ function bucketDeleteLifecycle(authInfo, request, log, callback) {
bucket.setLifecycleConfiguration(null); bucket.setLifecycleConfiguration(null);
return metadata.updateBucket(bucketName, bucket, log, err => { return metadata.updateBucket(bucketName, bucket, log, err => {
if (err) { if (err) {
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketLifecycle');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('deleteBucketLifecycle', log, { pushMetric('deleteBucketLifecycle', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'DELETE', bucketName, '200', 'deleteBucketLifecycle');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
}); });

View File

@ -2,6 +2,7 @@ const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* bucketDeleteReplication - Delete the bucket replication configuration * bucketDeleteReplication - Delete the bucket replication configuration
@ -26,6 +27,8 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
error: err, error: err,
method: 'bucketDeleteReplication', method: 'bucketDeleteReplication',
}); });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketReplication');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
if (!bucket.getReplicationConfiguration()) { if (!bucket.getReplicationConfiguration()) {
@ -42,12 +45,16 @@ function bucketDeleteReplication(authInfo, request, log, callback) {
bucket.setReplicationConfiguration(null); bucket.setReplicationConfiguration(null);
return metadata.updateBucket(bucketName, bucket, log, err => { return metadata.updateBucket(bucketName, bucket, log, err => {
if (err) { if (err) {
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketReplication');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('deleteBucketReplication', log, { pushMetric('deleteBucketReplication', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'DELETE', bucketName, '200', 'deleteBucketReplication');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
}); });

View File

@ -6,6 +6,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteWebsite'; const requestType = 'bucketDeleteWebsite';
@ -19,9 +20,13 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
request.method, bucket); request.method, bucket);
if (err) { if (err) {
log.debug('metadata getbucket failed', { error: err }); log.debug('metadata getbucket failed', { error: err });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketWebsite');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, requestType)) { if (bucketShield(bucket, requestType)) {
monitoring.promMetrics(
'DELETE', bucketName, 404, 'deleteBucketWebsite');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
@ -31,6 +36,8 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
requestType, requestType,
method: 'bucketDeleteWebsite', method: 'bucketDeleteWebsite',
}); });
monitoring.promMetrics(
'DELETE', bucketName, 403, 'deleteBucketWebsite');
return callback(errors.AccessDenied, corsHeaders); return callback(errors.AccessDenied, corsHeaders);
} }
@ -50,12 +57,16 @@ function bucketDeleteWebsite(authInfo, request, log, callback) {
bucket.setWebsiteConfiguration(null); bucket.setWebsiteConfiguration(null);
return metadata.updateBucket(bucketName, bucket, log, err => { return metadata.updateBucket(bucketName, bucket, log, err => {
if (err) { if (err) {
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteBucketWebsite');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('deleteBucketWebsite', log, { pushMetric('deleteBucketWebsite', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'DELETE', bucketName, '200', 'deleteBucketWebsite');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
}); });

View File

@ -1,14 +1,15 @@
const querystring = require('querystring'); const querystring = require('querystring');
const { errors, versioning, s3middleware } = require('arsenal'); const { errors, versioning, s3middleware } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
const parseWhere = require('../api/apiUtils/bucket/parseWhere');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const monitoring = require('../utilities/monitoringHandler');
const { generateToken, decryptToken } const { generateToken, decryptToken }
= require('../api/apiUtils/object/continueToken'); = require('../api/apiUtils/object/continueToken');
@ -29,8 +30,7 @@ const { generateToken, decryptToken }
</ListBucketResult> </ListBucketResult>
*/ */
// Sample XML response for GET bucket objects: /* Sample XML response for GET bucket objects:
/*
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>example-bucket</Name> <Name>example-bucket</Name>
<Prefix></Prefix> <Prefix></Prefix>
@ -60,7 +60,6 @@ const { generateToken, decryptToken }
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples
/* /*
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01"> <ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name> <Name>bucket</Name>
<Prefix>my</Prefix> <Prefix>my</Prefix>
@ -259,6 +258,7 @@ function handleResult(listParams, requestMaxKeys, encoding, authInfo,
res = processMasterVersions(bucketName, listParams, list); res = processMasterVersions(bucketName, listParams, list);
} }
pushMetric('listBucket', log, { authInfo, bucket: bucketName }); pushMetric('listBucket', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('GET', bucketName, '200', 'listBucket');
return callback(null, res, corsHeaders); return callback(null, res, corsHeaders);
} }
@ -283,14 +283,26 @@ function bucketGet(authInfo, request, log, callback) {
} }
const encoding = params['encoding-type']; const encoding = params['encoding-type'];
if (encoding !== undefined && encoding !== 'url') { if (encoding !== undefined && encoding !== 'url') {
monitoring.promMetrics(
'GET', bucketName, 400, 'listBucket');
return callback(errors.InvalidArgument.customizeDescription('Invalid ' + return callback(errors.InvalidArgument.customizeDescription('Invalid ' +
'Encoding Method specified in Request')); 'Encoding Method specified in Request'));
} }
const requestMaxKeys = params['max-keys'] ? const requestMaxKeys = params['max-keys'] ?
Number.parseInt(params['max-keys'], 10) : 1000; Number.parseInt(params['max-keys'], 10) : 1000;
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) { if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'listBucket');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
let validatedAst;
if (params.search !== undefined) {
const astOrError = validateSearchParams(params.search);
if (astOrError.error) {
return callback(astOrError.error);
}
validatedAst = astOrError.ast;
}
// AWS only returns 1000 keys even if max keys are greater. // AWS only returns 1000 keys even if max keys are greater.
// Max keys stated in response xml can be greater than actual // Max keys stated in response xml can be greater than actual
// keys returned. // keys returned.
@ -322,6 +334,8 @@ function bucketGet(authInfo, request, log, callback) {
request.method, bucket); request.method, bucket);
if (err) { if (err) {
log.debug('error processing request', { error: err }); log.debug('error processing request', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listBucket');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
if (params.versions !== undefined) { if (params.versions !== undefined) {
@ -341,10 +355,27 @@ function bucketGet(authInfo, request, log, callback) {
return handleResult(listParams, requestMaxKeys, encoding, authInfo, return handleResult(listParams, requestMaxKeys, encoding, authInfo,
bucketName, emptyList, corsHeaders, log, callback); bucketName, emptyList, corsHeaders, log, callback);
} }
if (params.search !== undefined) {
log.info('performing search listing', { search: params.search });
try {
listParams.mongifiedSearch = parseWhere(validatedAst);
} catch (err) {
log.debug(err.message, {
stack: err.stack,
});
monitoring.promMetrics(
'GET', bucketName, 400, 'listBucket');
return callback(errors.InvalidArgument
.customizeDescription('Invalid sql where clause ' +
'sent as search query'));
}
}
return services.getObjectListing(bucketName, listParams, log, return services.getObjectListing(bucketName, listParams, log,
(err, list) => { (err, list) => {
if (err) { if (err) {
log.debug('error processing request', { error: err }); log.debug('error processing request', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'listBucket');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
return handleResult(listParams, requestMaxKeys, encoding, authInfo, return handleResult(listParams, requestMaxKeys, encoding, authInfo,

View File

@ -4,6 +4,7 @@ const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
/* /*
@ -65,6 +66,8 @@ function bucketGetACL(authInfo, request, log, callback) {
if (err) { if (err) {
log.debug('error processing request', log.debug('error processing request',
{ method: 'bucketGetACL', error: err }); { method: 'bucketGetACL', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketAcl');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const bucketACL = bucket.getAcl(); const bucketACL = bucket.getAcl();
@ -136,6 +139,8 @@ function bucketGetACL(authInfo, request, log, callback) {
if (err) { if (err) {
log.debug('error processing request', log.debug('error processing request',
{ method: 'vault.getEmailAddresses', error: err }); { method: 'vault.getEmailAddresses', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketAcl');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const individualGrants = canonicalIDs.map(canonicalID => { const individualGrants = canonicalIDs.map(canonicalID => {
@ -166,6 +171,7 @@ function bucketGetACL(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getBucketAcl');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
}); });

View File

@ -7,6 +7,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketGetCors'; const requestType = 'bucketGetCors';
@ -26,9 +27,13 @@ function bucketGetCors(authInfo, request, log, callback) {
metadata.getBucket(bucketName, log, (err, bucket) => { metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) { if (err) {
log.debug('metadata getbucket failed', { error: err }); log.debug('metadata getbucket failed', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketCors');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, requestType)) { if (bucketShield(bucket, requestType)) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketCors');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
@ -40,6 +45,8 @@ function bucketGetCors(authInfo, request, log, callback) {
requestType, requestType,
method: 'bucketGetCors', method: 'bucketGetCors',
}); });
monitoring.promMetrics(
'GET', bucketName, 403, 'getBucketCors');
return callback(errors.AccessDenied, null, corsHeaders); return callback(errors.AccessDenied, null, corsHeaders);
} }
@ -48,6 +55,8 @@ function bucketGetCors(authInfo, request, log, callback) {
log.debug('cors configuration does not exist', { log.debug('cors configuration does not exist', {
method: 'bucketGetCors', method: 'bucketGetCors',
}); });
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketCors');
return callback(errors.NoSuchCORSConfiguration, null, corsHeaders); return callback(errors.NoSuchCORSConfiguration, null, corsHeaders);
} }
log.trace('converting cors configuration to xml'); log.trace('converting cors configuration to xml');
@ -57,6 +66,7 @@ function bucketGetCors(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getBucketCors');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -5,6 +5,7 @@ const LifecycleConfiguration =
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* bucketGetLifecycle - Get the bucket lifecycle configuration * bucketGetLifecycle - Get the bucket lifecycle configuration
@ -29,6 +30,8 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
error: err, error: err,
method: 'bucketGetLifecycle', method: 'bucketGetLifecycle',
}); });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketLifecycle');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const lifecycleConfig = bucket.getLifecycleConfiguration(); const lifecycleConfig = bucket.getLifecycleConfiguration();
@ -37,6 +40,8 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
error: errors.NoSuchLifecycleConfiguration, error: errors.NoSuchLifecycleConfiguration,
method: 'bucketGetLifecycle', method: 'bucketGetLifecycle',
}); });
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketLifecycle');
return callback(errors.NoSuchLifecycleConfiguration, null, return callback(errors.NoSuchLifecycleConfiguration, null,
corsHeaders); corsHeaders);
} }
@ -45,6 +50,7 @@ function bucketGetLifecycle(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getBucketLifecycle');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -7,6 +7,7 @@ const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketGetLocation'; const requestType = 'bucketGetLocation';
@ -27,9 +28,13 @@ function bucketGetLocation(authInfo, request, log, callback) {
return metadata.getBucket(bucketName, log, (err, bucket) => { return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) { if (err) {
log.debug('metadata getbucket failed', { error: err }); log.debug('metadata getbucket failed', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketLocation');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, requestType)) { if (bucketShield(bucket, requestType)) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketLocation');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
@ -42,6 +47,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
requestType, requestType,
method: 'bucketGetLocation', method: 'bucketGetLocation',
}); });
monitoring.promMetrics(
'GET', bucketName, 403, 'getBucketLocation');
return callback(errors.AccessDenied, null, corsHeaders); return callback(errors.AccessDenied, null, corsHeaders);
} }
@ -60,7 +67,8 @@ function bucketGetLocation(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'getBucketLocation');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -5,6 +5,7 @@ const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfigurationXML } = const { getReplicationConfigurationXML } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* bucketGetReplication - Get the bucket replication configuration * bucketGetReplication - Get the bucket replication configuration
@ -29,6 +30,8 @@ function bucketGetReplication(authInfo, request, log, callback) {
error: err, error: err,
method: 'bucketGetReplication', method: 'bucketGetReplication',
}); });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketReplication');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const replicationConfig = bucket.getReplicationConfiguration(); const replicationConfig = bucket.getReplicationConfiguration();
@ -37,6 +40,8 @@ function bucketGetReplication(authInfo, request, log, callback) {
error: errors.ReplicationConfigurationNotFoundError, error: errors.ReplicationConfigurationNotFoundError,
method: 'bucketGetReplication', method: 'bucketGetReplication',
}); });
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketReplication');
return callback(errors.ReplicationConfigurationNotFoundError, null, return callback(errors.ReplicationConfigurationNotFoundError, null,
corsHeaders); corsHeaders);
} }
@ -45,6 +50,8 @@ function bucketGetReplication(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'getBucketReplication');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -1,6 +1,7 @@
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
/* /*
@ -62,6 +63,8 @@ function bucketGetVersioning(authInfo, request, log, callback) {
if (err) { if (err) {
log.debug('error processing request', log.debug('error processing request',
{ method: 'bucketGetVersioning', error: err }); { method: 'bucketGetVersioning', error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketVersioning');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const versioningConfiguration = bucket.getVersioningConfiguration(); const versioningConfiguration = bucket.getVersioningConfiguration();
@ -70,6 +73,8 @@ function bucketGetVersioning(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'getBucketVersioning');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -7,6 +7,7 @@ const { isBucketAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketGetWebsite'; const requestType = 'bucketGetWebsite';
@ -26,9 +27,13 @@ function bucketGetWebsite(authInfo, request, log, callback) {
metadata.getBucket(bucketName, log, (err, bucket) => { metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) { if (err) {
log.debug('metadata getbucket failed', { error: err }); log.debug('metadata getbucket failed', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getBucketWebsite');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, requestType)) { if (bucketShield(bucket, requestType)) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketWebsite');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
log.trace('found bucket in metadata'); log.trace('found bucket in metadata');
@ -40,6 +45,8 @@ function bucketGetWebsite(authInfo, request, log, callback) {
requestType, requestType,
method: 'bucketGetWebsite', method: 'bucketGetWebsite',
}); });
monitoring.promMetrics(
'GET', bucketName, 403, 'getBucketWebsite');
return callback(errors.AccessDenied, null, corsHeaders); return callback(errors.AccessDenied, null, corsHeaders);
} }
@ -48,6 +55,8 @@ function bucketGetWebsite(authInfo, request, log, callback) {
log.debug('bucket website configuration does not exist', { log.debug('bucket website configuration does not exist', {
method: 'bucketGetWebsite', method: 'bucketGetWebsite',
}); });
monitoring.promMetrics(
'GET', bucketName, 404, 'getBucketWebsite');
return callback(errors.NoSuchWebsiteConfiguration, null, return callback(errors.NoSuchWebsiteConfiguration, null,
corsHeaders); corsHeaders);
} }
@ -58,6 +67,8 @@ function bucketGetWebsite(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'getBucketWebsite');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -2,6 +2,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* Determine if bucket exists and if user has permission to access it * Determine if bucket exists and if user has permission to access it
@ -24,12 +25,16 @@ function bucketHead(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headBucket');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('headBucket', log, { pushMetric('headBucket', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'HEAD', bucketName, '200', 'headBucket');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
} }

View File

@ -8,6 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { config } = require('../Config'); const { config } = require('../Config');
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
let { restEndpoints, locationConstraints } = config; let { restEndpoints, locationConstraints } = config;
config.on('rest-endpoints-update', () => { config.on('rest-endpoints-update', () => {
@ -46,12 +47,12 @@ function checkLocationConstraint(request, locationConstraint, log) {
if (!locationConstraints[locationConstraintChecked]) { if (!locationConstraints[locationConstraintChecked]) {
const errMsg = 'value of the location you are attempting to set - ' + const errMsg = 'value of the location you are attempting to set - ' +
`${locationConstraintChecked} - is not listed in the ` + `${locationConstraintChecked} - is not listed in the ` +
'locationConstraint config'; 'locationConstraint config';
log.trace(`locationConstraint is invalid - ${errMsg}`, log.trace(`locationConstraint is invalid - ${errMsg}`,
{ locationConstraint: locationConstraintChecked }); { locationConstraint: locationConstraintChecked });
return { error: errors.InvalidLocationConstraint. return { error: errors.InvalidLocationConstraint.
customizeDescription(errMsg) }; customizeDescription(errMsg) };
} }
return { error: null, locationConstraint: locationConstraintChecked }; return { error: null, locationConstraint: locationConstraintChecked };
} }
@ -109,15 +110,25 @@ function bucketPut(authInfo, request, log, callback) {
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics(
'PUT', request.bucketName, 403, 'createBucket');
return callback(errors.AccessDenied); return callback(errors.AccessDenied);
} }
if (!aclUtils.checkGrantHeaderValidity(request.headers)) { if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header'); log.trace('invalid acl header');
monitoring.promMetrics(
'PUT', request.bucketName, 400, 'createBucket');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
const { bucketName } = request; const { bucketName } = request;
if (request.bucketName === 'METADATA') {
if (request.bucketName === 'METADATA'
// Note: for this to work with Vault, would need way to set
// canonical ID to http://acs.zenko.io/accounts/service/clueso
&& !authInfo.isRequesterThisServiceAccount('clueso')) {
monitoring.promMetrics(
'PUT', bucketName, 403, 'createBucket');
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('The bucket METADATA is used ' + .customizeDescription('The bucket METADATA is used ' +
'for internal purposes')); 'for internal purposes'));
@ -179,6 +190,7 @@ function bucketPut(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('PUT', bucketName, '200', 'createBucket');
return next(null, corsHeaders); return next(null, corsHeaders);
}), }),
], callback); ], callback);

View File

@ -9,6 +9,7 @@ const constants = require('../../constants');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/* /*
Format of xml request: Format of xml request:
@ -58,10 +59,12 @@ function bucketPutACL(authInfo, request, log, callback) {
acl: newCannedACL, acl: newCannedACL,
method: 'bucketPutACL', method: 'bucketPutACL',
}); });
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
if (!aclUtils.checkGrantHeaderValidity(request.headers)) { if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header'); log.trace('invalid acl header');
monitoring.promMetrics('PUT', bucketName, 400, 'bucketPutACL');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
const possibleGroups = [constants.allAuthedUsersId, const possibleGroups = [constants.allAuthedUsersId,
@ -229,6 +232,8 @@ function bucketPutACL(authInfo, request, log, callback) {
id, id,
method: 'bucketPutACL', method: 'bucketPutACL',
}); });
monitoring.promMetrics('PUT', bucketName, 400,
'bucketPutACL');
return callback(errors.InvalidArgument, bucket); return callback(errors.InvalidArgument, bucket);
} }
} }
@ -284,11 +289,13 @@ function bucketPutACL(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'bucketPutACL' }); method: 'bucketPutACL' });
monitoring.promMetrics('PUT', bucketName, err.code, 'bucketPutACL');
} else { } else {
pushMetric('putBucketAcl', log, { pushMetric('putBucketAcl', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('PUT', bucketName, '200', 'bucketPutACL');
} }
return callback(err, corsHeaders); return callback(err, corsHeaders);
}); });

View File

@ -9,6 +9,7 @@ const { isBucketAuthorized } =
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { parseCorsXml } = require('./apiUtils/bucket/bucketCors'); const { parseCorsXml } = require('./apiUtils/bucket/bucketCors');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketPutCors'; const requestType = 'bucketPutCors';
@ -29,6 +30,7 @@ function bucketPutCors(authInfo, request, log, callback) {
if (!request.post) { if (!request.post) {
log.debug('CORS xml body is missing', log.debug('CORS xml body is missing',
{ error: errors.MissingRequestBodyError }); { error: errors.MissingRequestBodyError });
monitoring.promMetrics('PUT', bucketName, 400, 'putBucketCors');
return callback(errors.MissingRequestBodyError); return callback(errors.MissingRequestBodyError);
} }
@ -36,12 +38,14 @@ function bucketPutCors(authInfo, request, log, callback) {
.update(request.post, 'utf8').digest('base64'); .update(request.post, 'utf8').digest('base64');
if (md5 !== request.headers['content-md5']) { if (md5 !== request.headers['content-md5']) {
log.debug('bad md5 digest', { error: errors.BadDigest }); log.debug('bad md5 digest', { error: errors.BadDigest });
monitoring.promMetrics('PUT', bucketName, 400, 'putBucketCors');
return callback(errors.BadDigest); return callback(errors.BadDigest);
} }
if (parseInt(request.headers['content-length'], 10) > 65536) { if (parseInt(request.headers['content-length'], 10) > 65536) {
const errMsg = 'The CORS XML document is limited to 64 KB in size.'; const errMsg = 'The CORS XML document is limited to 64 KB in size.';
log.debug(errMsg, { error: errors.MalformedXML }); log.debug(errMsg, { error: errors.MalformedXML });
monitoring.promMetrics('PUT', bucketName, 400, 'putBucketCors');
return callback(errors.MalformedXML.customizeDescription(errMsg)); return callback(errors.MalformedXML.customizeDescription(errMsg));
} }
@ -86,11 +90,14 @@ function bucketPutCors(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'bucketPutCors' }); method: 'bucketPutCors' });
monitoring.promMetrics('PUT', bucketName, err.code,
'putBucketCors');
} }
pushMetric('putBucketCors', log, { pushMetric('putBucketCors', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('PUT', bucketName, '200', 'putBucketCors');
return callback(err, corsHeaders); return callback(err, corsHeaders);
}); });
} }

View File

@ -7,6 +7,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
/** /**
* Bucket Put Lifecycle - Create or update bucket lifecycle configuration * Bucket Put Lifecycle - Create or update bucket lifecycle configuration
@ -55,12 +56,15 @@ function bucketPutLifecycle(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'bucketPutLifecycle' }); method: 'bucketPutLifecycle' });
monitoring.promMetrics(
'PUT', bucketName, err.code, 'putBucketLifecycle');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('putBucketLifecycle', log, { pushMetric('putBucketLifecycle', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('PUT', bucketName, '200', 'putBucketLifecycle');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
} }

View File

@ -6,7 +6,10 @@ const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getReplicationConfiguration } = const { getReplicationConfiguration } =
require('./apiUtils/bucket/getReplicationConfiguration'); require('./apiUtils/bucket/getReplicationConfiguration');
const validateConfiguration =
require('./apiUtils/bucket/validateReplicationConfig');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const monitoring = require('../utilities/monitoringHandler');
// The error response when a bucket does not have versioning 'Enabled'. // The error response when a bucket does not have versioning 'Enabled'.
const versioningNotEnabledError = errors.InvalidRequest.customizeDescription( const versioningNotEnabledError = errors.InvalidRequest.customizeDescription(
@ -39,16 +42,26 @@ function bucketPutReplication(authInfo, request, log, callback) {
if (err) { if (err) {
return next(err); return next(err);
} }
// Replication requires that versioning is 'Enabled'. // Replication requires that versioning is 'Enabled' unless it
if (!bucket.isVersioningEnabled(bucket)) { // is an NFS bucket.
if (!bucket.isNFS() && !bucket.isVersioningEnabled(bucket)) {
return next(versioningNotEnabledError); return next(versioningNotEnabledError);
} }
return next(null, config, bucket); return next(null, config, bucket);
}), }),
// Set the replication configuration and update the bucket metadata. // Set the replication configuration and update the bucket metadata.
(config, bucket, next) => { (config, bucket, next) => {
// validate there's a preferred read location in case the
// bucket location is a transient source
if (!validateConfiguration(config, bucket)) {
const msg = 'Replication configuration lacks a preferred ' +
'read location';
log.error(msg, { bucketName: bucket.getName() });
return next(errors.ValidationError
.customizeDescription(msg));
}
bucket.setReplicationConfiguration(config); bucket.setReplicationConfiguration(config);
metadata.updateBucket(bucket.getName(), bucket, log, err => return metadata.updateBucket(bucket.getName(), bucket, log, err =>
next(err, bucket)); next(err, bucket));
}, },
], (err, bucket) => { ], (err, bucket) => {
@ -58,12 +71,16 @@ function bucketPutReplication(authInfo, request, log, callback) {
error: err, error: err,
method: 'bucketPutReplication', method: 'bucketPutReplication',
}); });
monitoring.promMetrics(
'PUT', bucketName, err.code, 'putBucketReplication');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
pushMetric('putBucketReplication', log, { pushMetric('putBucketReplication', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'PUT', bucketName, '200', 'putBucketReplication');
return callback(null, corsHeaders); return callback(null, corsHeaders);
}); });
} }

View File

@ -9,9 +9,10 @@ const { pushMetric } = require('../utapi/utilities');
const versioningNotImplBackends = const versioningNotImplBackends =
require('../../constants').versioningNotImplBackends; require('../../constants').versioningNotImplBackends;
const { config } = require('../Config'); const { config } = require('../Config');
const monitoring = require('../utilities/monitoringHandler');
const externalVersioningErrorMessage = 'We do not currently support putting ' + const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure.'; 'a versioned object to a location-constraint of type Azure or GCP.';
/** /**
* Format of xml request: * Format of xml request:
@ -60,9 +61,17 @@ function _checkBackendVersioningImplemented(bucket) {
const bucketLocation = bucket.getLocationConstraint(); const bucketLocation = bucket.getLocationConstraint();
const bucketLocationType = config.getLocationConstraintType(bucketLocation); const bucketLocationType = config.getLocationConstraintType(bucketLocation);
// backend types known not to support versioning
if (versioningNotImplBackends[bucketLocationType]) { if (versioningNotImplBackends[bucketLocationType]) {
return false; return false;
} }
// versioning disabled per-location constraint
const lc = config.getLocationConstraint(bucketLocation);
if (lc.details && !lc.details.supportsVersioning) {
return false;
}
return true; return true;
} }
@ -83,7 +92,6 @@ function bucketPutVersioning(authInfo, request, log, callback) {
bucketName, bucketName,
requestType: 'bucketPutVersioning', requestType: 'bucketPutVersioning',
}; };
return waterfall([ return waterfall([
next => _parseXML(request, log, next), next => _parseXML(request, log, next),
next => metadataValidateBucket(metadataValParams, log, next => metadataValidateBucket(metadataValParams, log,
@ -93,6 +101,12 @@ function bucketPutVersioning(authInfo, request, log, callback) {
if (err) { if (err) {
return next(err, bucket); return next(err, bucket);
} }
// prevent enabling versioning on an nfs exported bucket
if (bucket.isNFS()) {
const error = new Error();
error.code = 'NFSBUCKET';
return next(error);
}
// _checkBackendVersioningImplemented returns false if versioning // _checkBackendVersioningImplemented returns false if versioning
// is not implemented on the bucket backend // is not implemented on the bucket backend
if (!_checkBackendVersioningImplemented(bucket)) { if (!_checkBackendVersioningImplemented(bucket)) {
@ -124,14 +138,22 @@ function bucketPutVersioning(authInfo, request, log, callback) {
], (err, bucket) => { ], (err, bucket) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err && err.code === 'NFSBUCKET') {
log.trace('skipping versioning for nfs exported bucket');
return callback(null, corsHeaders);
}
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'bucketPutVersioning' }); method: 'bucketPutVersioning' });
monitoring.promMetrics(
'PUT', bucketName, err.code, 'putBucketVersioning');
} else { } else {
pushMetric('putBucketVersioning', log, { pushMetric('putBucketVersioning', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'PUT', bucketName, '200', 'putBucketVersioning');
} }
return callback(err, corsHeaders); return callback(err, corsHeaders);
}); });

View File

@ -8,6 +8,7 @@ const { isBucketAuthorized } =
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const { parseWebsiteConfigXml } = require('./apiUtils/bucket/bucketWebsite'); const { parseWebsiteConfigXml } = require('./apiUtils/bucket/bucketWebsite');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketPutWebsite'; const requestType = 'bucketPutWebsite';
@ -26,6 +27,8 @@ function bucketPutWebsite(authInfo, request, log, callback) {
const requestArn = authInfo.getArn(); const requestArn = authInfo.getArn();
if (!request.post) { if (!request.post) {
monitoring.promMetrics(
'PUT', bucketName, 400, 'putBucketWebsite');
return callback(errors.MissingRequestBodyError); return callback(errors.MissingRequestBodyError);
} }
return async.waterfall([ return async.waterfall([
@ -70,11 +73,15 @@ function bucketPutWebsite(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'bucketPutWebsite' }); method: 'bucketPutWebsite' });
monitoring.promMetrics(
'PUT', bucketName, err.code, 'putBucketWebsite');
} else { } else {
pushMetric('putBucketWebsite', log, { pushMetric('putBucketWebsite', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'PUT', bucketName, '200', 'putBucketWebsite');
} }
return callback(err, corsHeaders); return callback(err, corsHeaders);
}); });

View File

@ -14,9 +14,10 @@ const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader; .validateWebsiteHeader;
const { config } = require('../Config'); const { config } = require('../Config');
const multipleBackendGateway = require('../data/multipleBackendGateway'); const multipleBackendGateway = require('../data/multipleBackendGateway');
const monitoring = require('../utilities/monitoringHandler');
const externalVersioningErrorMessage = 'We do not currently support putting ' + const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type Azure.'; 'a versioned object to a location-constraint of type Azure or GCP.';
/* /*
Sample xml response: Sample xml response:
@ -142,6 +143,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
log.trace('error storing multipart object', { log.trace('error storing multipart object', {
error: err, error: err,
}); });
monitoring.promMetrics('PUT', bucketName, err.code,
'initiateMultipartUpload');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
log.addDefaultFields({ uploadId }); log.addDefaultFields({ uploadId });
@ -151,6 +154,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
bucket: bucketName, bucket: bucketName,
keys: [objectKey], keys: [objectKey],
}); });
monitoring.promMetrics('PUT', bucketName, '200',
'initiateMultipartUpload');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
}); });
@ -184,6 +189,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
undefined, undefined, undefined, log, undefined, undefined, undefined, log,
(err, dataBackendResObj) => { (err, dataBackendResObj) => {
if (err) { if (err) {
monitoring.promMetrics('PUT', bucketName, err.code,
'initiateMultipartUpload');
return callback(err); return callback(err);
} }
if (locConstraint && if (locConstraint &&
@ -198,6 +205,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
log.debug(externalVersioningErrorMessage, log.debug(externalVersioningErrorMessage,
{ method: 'initiateMultipartUpload', { method: 'initiateMultipartUpload',
error: errors.NotImplemented }); error: errors.NotImplemented });
monitoring.promMetrics('PUT', bucketName, 501,
'initiateMultipartUpload');
return callback(errors.NotImplemented return callback(errors.NotImplemented
.customizeDescription(externalVersioningErrorMessage)); .customizeDescription(externalVersioningErrorMessage));
} }
@ -229,12 +238,16 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
error: err, error: err,
method: 'metadataValidateBucketAndObj', method: 'metadataValidateBucketAndObj',
}); });
monitoring.promMetrics('PUT', bucketName, err.code,
'initiateMultipartUpload');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
if (destinationBucket.hasDeletedFlag() && if (destinationBucket.hasDeletedFlag() &&
accountCanonicalID !== destinationBucket.getOwner()) { accountCanonicalID !== destinationBucket.getOwner()) {
log.trace('deleted flag on bucket and request ' + log.trace('deleted flag on bucket and request ' +
'from non-owner account'); 'from non-owner account');
monitoring.promMetrics('PUT', bucketName, 404,
'initiateMultipartUpload');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
if (destinationBucket.hasTransientFlag() || if (destinationBucket.hasTransientFlag() ||
@ -255,6 +268,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
// To avoid confusing user with error // To avoid confusing user with error
// from cleaning up // from cleaning up
// bucket return InternalError // bucket return InternalError
monitoring.promMetrics('PUT', bucketName, 500,
'initiateMultipartUpload');
return callback(errors.InternalError, return callback(errors.InternalError,
null, corsHeaders); null, corsHeaders);
} }

View File

@ -8,6 +8,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services'); const services = require('../services');
const { metadataValidateBucket } = require('../metadata/metadataUtils'); const { metadataValidateBucket } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
/* /*
@ -118,6 +119,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
let maxUploads = query['max-uploads'] !== undefined ? let maxUploads = query['max-uploads'] !== undefined ?
Number.parseInt(query['max-uploads'], 10) : 1000; Number.parseInt(query['max-uploads'], 10) : 1000;
if (maxUploads < 0) { if (maxUploads < 0) {
monitoring.promMetrics('GET', bucketName, 400,
'listMultipartUploads');
return callback(errors.InvalidArgument, bucket); return callback(errors.InvalidArgument, bucket);
} }
if (maxUploads > constants.listingHardLimit) { if (maxUploads > constants.listingHardLimit) {
@ -141,6 +144,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {
monitoring.promMetrics('GET', bucketName, err.code,
'listMultipartUploads');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const xmlParams = { const xmlParams = {
@ -156,6 +161,8 @@ function listMultipartUploads(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'listMultipartUploads');
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -11,6 +11,7 @@ const services = require('../services');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const escapeForXml = s3middleware.escapeForXml; const escapeForXml = s3middleware.escapeForXml;
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { config } = require('../../lib/Config'); const { config } = require('../../lib/Config');
const multipleBackendGateway = require('../data/multipleBackendGateway'); const multipleBackendGateway = require('../data/multipleBackendGateway');
@ -82,6 +83,8 @@ function listParts(authInfo, request, log, callback) {
let maxParts = Number.parseInt(request.query['max-parts'], 10) ? let maxParts = Number.parseInt(request.query['max-parts'], 10) ?
Number.parseInt(request.query['max-parts'], 10) : 1000; Number.parseInt(request.query['max-parts'], 10) : 1000;
if (maxParts < 0) { if (maxParts < 0) {
monitoring.promMetrics('GET', bucketName, 400,
'listMultipartUploadParts');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
if (maxParts > constants.listingHardLimit) { if (maxParts > constants.listingHardLimit) {
@ -280,11 +283,15 @@ function listParts(authInfo, request, log, callback) {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'listMultipartUploadParts');
next(null, destBucket, xml.join('')); next(null, destBucket, xml.join(''));
}, },
], (err, destinationBucket, xml) => { ], (err, destinationBucket, xml) => {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, destinationBucket); request.method, destinationBucket);
monitoring.promMetrics('GET', bucketName, 400,
'listMultipartUploadParts');
return callback(err, xml, corsHeaders); return callback(err, xml, corsHeaders);
}); });
return undefined; return undefined;

View File

@ -17,6 +17,7 @@ const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const createAndStoreObject = require('./apiUtils/object/createAndStoreObject'); const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { metadataGetObject } = require('../metadata/metadataUtils'); const { metadataGetObject } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -196,6 +197,8 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
'null' : versionIdUtils.decode(entry.versionId); 'null' : versionIdUtils.decode(entry.versionId);
} }
if (decodedVersionId instanceof Error) { if (decodedVersionId instanceof Error) {
monitoring.promMetrics('DELETE', bucketName, 404,
'multiObjectDelete');
return callback(errors.NoSuchVersion); return callback(errors.NoSuchVersion);
} }
return callback(null, decodedVersionId); return callback(null, decodedVersionId);
@ -206,6 +209,8 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
versionId, log, (err, objMD) => { versionId, log, (err, objMD) => {
// if general error from metadata return error // if general error from metadata return error
if (err && !err.NoSuchKey) { if (err && !err.NoSuchKey) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return callback(err); return callback(err);
} }
if (err && err.NoSuchKey) { if (err && err.NoSuchKey) {
@ -307,11 +312,15 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
function multiObjectDelete(authInfo, request, log, callback) { function multiObjectDelete(authInfo, request, log, callback) {
log.debug('processing request', { method: 'multiObjectDelete' }); log.debug('processing request', { method: 'multiObjectDelete' });
if (!request.post) { if (!request.post) {
monitoring.promMetrics('DELETE', request.bucketName, 400,
'multiObjectDelete');
return callback(errors.MissingRequestBodyError); return callback(errors.MissingRequestBodyError);
} }
const md5 = crypto.createHash('md5') const md5 = crypto.createHash('md5')
.update(request.post, 'utf8').digest('base64'); .update(request.post, 'utf8').digest('base64');
if (md5 !== request.headers['content-md5']) { if (md5 !== request.headers['content-md5']) {
monitoring.promMetrics('DELETE', request.bucketName, 400,
'multiObjectDelete');
return callback(errors.BadDigest); return callback(errors.BadDigest);
} }
@ -480,6 +489,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'multiObjectDelete');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const xml = _formatXML(quietSetting, errorResults, const xml = _formatXML(quietSetting, errorResults,
@ -492,6 +503,10 @@ function multiObjectDelete(authInfo, request, log, callback) {
byteLength: Number.parseInt(totalContentLengthDeleted, 10), byteLength: Number.parseInt(totalContentLengthDeleted, 10),
numberOfObjects: numOfObjectsRemoved, numberOfObjects: numOfObjectsRemoved,
}); });
monitoring.promMetrics('DELETE', bucketName, '200',
'multiObjectDelete',
Number.parseInt(totalContentLengthDeleted, 10), null, null,
numOfObjectsRemoved);
return callback(null, xml, corsHeaders); return callback(null, xml, corsHeaders);
}); });
} }

View File

@ -3,6 +3,7 @@ const { errors } = require('arsenal');
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload'); const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior'); const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
const monitoring = require('../utilities/monitoringHandler');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
/** /**
@ -37,13 +38,13 @@ function multipartDelete(authInfo, request, log, callback) {
method: 'multipartDelete', method: 'multipartDelete',
uploadId, uploadId,
}); });
// if legacy behavior is enabled for 'us-east-1' and monitoring.promMetrics('DELETE', bucketName, 400,
// request is from 'us-east-1', return 404 instead of 'abortMultipartUpload');
// 204
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
monitoring.promMetrics('DELETE', bucketName, 400,
'abortMultipartUpload');
if (!err) { if (!err) {
// NoSuchUpload should not be recorded by Utapi
pushMetric('abortMultipartUpload', log, { pushMetric('abortMultipartUpload', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,

View File

@ -20,12 +20,13 @@ const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const validateWebsiteHeader = require('./apiUtils/object/websiteServing') const validateWebsiteHeader = require('./apiUtils/object/websiteServing')
.validateWebsiteHeader; .validateWebsiteHeader;
const { config } = require('../Config'); const { config } = require('../Config');
const monitoring = require('../utilities/monitoringHandler');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
const locationHeader = constants.objectLocationConstraintHeader; const locationHeader = constants.objectLocationConstraintHeader;
const versioningNotImplBackends = constants.versioningNotImplBackends; const versioningNotImplBackends = constants.versioningNotImplBackends;
const externalVersioningErrorMessage = 'We do not currently support putting ' + const externalVersioningErrorMessage = 'We do not currently support putting ' +
'a versioned object to a location-constraint of type AWS or Azure.'; 'a versioned object to a location-constraint of type AWS or Azure or GCP.';
/** /**
* Preps metadata to be saved (based on copy or replace request header) * Preps metadata to be saved (based on copy or replace request header)
@ -218,6 +219,8 @@ function objectCopy(authInfo, request, sourceBucket,
const err = errors.InvalidRedirectLocation; const err = errors.InvalidRedirectLocation;
log.debug('invalid x-amz-website-redirect-location' + log.debug('invalid x-amz-website-redirect-location' +
`value ${websiteRedirectHeader}`, { error: err }); `value ${websiteRedirectHeader}`, { error: err });
monitoring.promMetrics(
'PUT', destBucketName, err.code, 'copyObject');
return callback(err); return callback(err);
} }
const queryContainsVersionId = checkQueryVersionId(request.query); const queryContainsVersionId = checkQueryVersionId(request.query);
@ -467,6 +470,8 @@ function objectCopy(authInfo, request, sourceBucket,
request.method, destBucketMD); request.method, destBucketMD);
if (err) { if (err) {
monitoring.promMetrics(
'PUT', destBucketName, err.code, 'copyObject');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const xml = [ const xml = [
@ -503,6 +508,8 @@ function objectCopy(authInfo, request, sourceBucket,
newByteLength: sourceObjSize, newByteLength: sourceObjSize,
oldByteLength: isVersioned ? null : destObjPrevSize, oldByteLength: isVersioned ? null : destObjPrevSize,
}); });
monitoring.promMetrics('PUT', destBucketName, '200',
'copyObject', sourceObjSize, destObjPrevSize, isVersioned);
// Add expiration header if lifecycle enabled // Add expiration header if lifecycle enabled
return callback(null, xml, additionalHeaders); return callback(null, xml, additionalHeaders);
}); });

View File

@ -1,6 +1,6 @@
const async = require('async'); const async = require('async');
const { errors, versioning } = require('arsenal'); const { errors, versioning, s3middleware } = require('arsenal');
const checkDateModifiedHeaders = s3middleware.checkDateModifiedHeaders;
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const services = require('../services'); const services = require('../services');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
@ -8,6 +8,7 @@ const createAndStoreObject = require('./apiUtils/object/createAndStoreObject');
const { decodeVersionId, preprocessingVersioningDelete } const { decodeVersionId, preprocessingVersioningDelete }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -24,6 +25,8 @@ function objectDelete(authInfo, request, log, cb) {
log.debug('processing request', { method: 'objectDelete' }); log.debug('processing request', { method: 'objectDelete' });
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics(
'DELETE', request.bucketName, 403, 'deleteObject');
return cb(errors.AccessDenied); return cb(errors.AccessDenied);
} }
const bucketName = request.bucketName; const bucketName = request.bucketName;
@ -99,11 +102,46 @@ function objectDelete(authInfo, request, log, cb) {
return next(null, bucketMD, objectMD, options); return next(null, bucketMD, objectMD, options);
}); });
}, },
function validateHeaders(bucketMD, objectMD, options, next) {
if (objectMD) {
const lastModified = objectMD['last-modified'];
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(request.headers, lastModified);
const err = modifiedSinceRes.error || unmodifiedSinceRes.error;
if (err) {
return process.nextTick(() => next(err, bucketMD));
}
}
return process.nextTick(() =>
next(null, bucketMD, objectMD, options));
},
function deleteOperation(bucketMD, objectMD, delOptions, next) { function deleteOperation(bucketMD, objectMD, delOptions, next) {
const deleteInfo = { const deleteInfo = {
removeDeleteMarker: false, removeDeleteMarker: false,
newDeleteMarker: false, newDeleteMarker: false,
}; };
if (delOptions && delOptions.deleteData && bucketMD.isNFS() &&
bucketMD.getReplicationConfiguration()) {
// If an NFS bucket that has replication configured, we want
// to put a delete marker on the destination even though the
// source does not have versioning.
return createAndStoreObject(bucketName, bucketMD, objectKey,
objectMD, authInfo, canonicalID, null, request, true, null,
log, err => {
if (err) {
return next(err);
}
if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set
// response headers accordingly
deleteInfo.removeDeleteMarker = true;
}
return services.deleteObject(bucketName, objectMD,
objectKey, delOptions, log, (err, delResult) =>
next(err, bucketMD, objectMD, delResult,
deleteInfo));
});
}
if (delOptions && delOptions.deleteData) { if (delOptions && delOptions.deleteData) {
if (objectMD.isDeleteMarker) { if (objectMD.isDeleteMarker) {
// record that we deleted a delete marker to set // record that we deleted a delete marker to set
@ -136,6 +174,8 @@ function objectDelete(authInfo, request, log, cb) {
if (err) { if (err) {
log.debug('error processing request', { error: err, log.debug('error processing request', { error: err,
method: 'objectDelete' }); method: 'objectDelete' });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteObject');
return cb(err, resHeaders); return cb(err, resHeaders);
} }
if (deleteInfo.newDeleteMarker) { if (deleteInfo.newDeleteMarker) {
@ -158,6 +198,8 @@ function objectDelete(authInfo, request, log, cb) {
keys: [objectKey], keys: [objectKey],
byteLength: Number.parseInt(objectMD['content-length'], 10), byteLength: Number.parseInt(objectMD['content-length'], 10),
numberOfObjects: 1 }); numberOfObjects: 1 });
monitoring.promMetrics('DELETE', bucketName, '200', 'deleteObject',
Number.parseInt(objectMD['content-length'], 10));
} }
return cb(err, resHeaders); return cb(err, resHeaders);
}); });

View File

@ -6,6 +6,7 @@ const { decodeVersionId, getVersionIdResHeader }
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
@ -96,12 +97,16 @@ function objectDeleteTagging(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'objectDeleteTagging' }); method: 'objectDeleteTagging' });
monitoring.promMetrics(
'DELETE', bucketName, err.code, 'deleteObjectTagging');
} else { } else {
pushMetric('deleteObjectTagging', log, { pushMetric('deleteObjectTagging', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
keys: [objectKey], keys: [objectKey],
}); });
monitoring.promMetrics(
'DELETE', bucketName, '200', 'deleteObjectTagging');
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] = additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD); getVersionIdResHeader(verCfg, objectMD);

View File

@ -9,7 +9,15 @@ const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getVersionIdResHeader } = require('./apiUtils/object/versioning'); const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
const setPartRanges = require('./apiUtils/object/setPartRanges'); const setPartRanges = require('./apiUtils/object/setPartRanges');
const locationHeaderCheck =
require('./apiUtils/object/locationHeaderCheck');
const getReplicationBackendDataLocator =
require('./apiUtils/object/getReplicationBackendDataLocator');
const checkReadLocation = require('./apiUtils/object/checkReadLocation');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { config } = require('../Config');
const monitoring = require('../utilities/monitoringHandler');
const validateHeaders = s3middleware.validateConditionalHeaders; const validateHeaders = s3middleware.validateConditionalHeaders;
@ -27,6 +35,17 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
const bucketName = request.bucketName; const bucketName = request.bucketName;
const objectKey = request.objectKey; const objectKey = request.objectKey;
// returns name of location to get from and key if successful
const locCheckResult =
locationHeaderCheck(request.headers, objectKey, bucketName);
if (locCheckResult instanceof Error) {
log.trace('invalid location constraint to get from', {
location: request.headers['x-amz-location-constraint'],
error: locCheckResult,
});
return callback(locCheckResult);
}
const decodedVidResult = decodeVersionId(request.query); const decodedVidResult = decodeVersionId(request.query);
if (decodedVidResult instanceof Error) { if (decodedVidResult instanceof Error) {
log.trace('invalid versionId query', { log.trace('invalid versionId query', {
@ -54,10 +73,14 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
error: err, error: err,
method: 'metadataValidateBucketAndObj', method: 'metadataValidateBucketAndObj',
}); });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
if (!objMD) { if (!objMD) {
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
@ -65,12 +88,16 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
const responseMetaHeaders = Object.assign({}, const responseMetaHeaders = Object.assign({},
{ 'x-amz-delete-marker': true }, corsHeaders); { 'x-amz-delete-marker': true }, corsHeaders);
if (!versionId) { if (!versionId) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getObject');
return callback(errors.NoSuchKey, null, responseMetaHeaders); return callback(errors.NoSuchKey, null, responseMetaHeaders);
} }
// return MethodNotAllowed if requesting a specific // return MethodNotAllowed if requesting a specific
// version that has a delete marker // version that has a delete marker
responseMetaHeaders['x-amz-version-id'] = responseMetaHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objMD); getVersionIdResHeader(verCfg, objMD);
monitoring.promMetrics(
'GET', bucketName, 405, 'getObject');
return callback(errors.MethodNotAllowed, null, return callback(errors.MethodNotAllowed, null,
responseMetaHeaders); responseMetaHeaders);
} }
@ -90,6 +117,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
const { range, error } = parseRange(request.headers.range, const { range, error } = parseRange(request.headers.range,
objLength); objLength);
if (error) { if (error) {
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(error, null, corsHeaders); return callback(error, null, corsHeaders);
} }
responseMetaHeaders['Accept-Ranges'] = 'bytes'; responseMetaHeaders['Accept-Ranges'] = 'bytes';
@ -113,6 +142,37 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
// objMD.location is just a string // objMD.location is just a string
dataLocator = Array.isArray(objMD.location) ? dataLocator = Array.isArray(objMD.location) ?
objMD.location : [{ key: objMD.location }]; objMD.location : [{ key: objMD.location }];
const repConf = bucket.getReplicationConfiguration();
const prefReadLocation = repConf && repConf.preferredReadLocation;
const prefReadDataLocator = checkReadLocation(config,
prefReadLocation, objectKey, bucketName);
const targetLocation = locCheckResult || prefReadDataLocator ||
null;
if (targetLocation &&
targetLocation.location !== objMD.dataStoreName) {
const repBackendResult = getReplicationBackendDataLocator(
targetLocation, objMD.replicationInfo);
if (repBackendResult.error) {
log.error('Error with location constraint header', {
bucketName, objectKey, versionId,
error: repBackendResult.error,
status: repBackendResult.status,
});
return callback(repBackendResult.error, null, corsHeaders);
}
const targetDataLocator = repBackendResult.dataLocator;
if (targetDataLocator) {
dataLocator = targetDataLocator;
} else {
log.debug('using source location as preferred read ' +
'is unavailable', {
bucketName, objectKey, versionId,
reason: repBackendResult.reason,
});
}
}
// if the data backend is azure, there will only ever be at // if the data backend is azure, there will only ever be at
// most one item in the dataLocator array // most one item in the dataLocator array
if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') { if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') {
@ -125,18 +185,24 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
const error = errors.InvalidRequest const error = errors.InvalidRequest
.customizeDescription('Cannot specify both Range ' + .customizeDescription('Cannot specify both Range ' +
'header and partNumber query parameter.'); 'header and partNumber query parameter.');
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(error, null, corsHeaders); return callback(error, null, corsHeaders);
} }
partNumber = Number.parseInt(request.query.partNumber, 10); partNumber = Number.parseInt(request.query.partNumber, 10);
if (Number.isNaN(partNumber)) { if (Number.isNaN(partNumber)) {
const error = errors.InvalidArgument const error = errors.InvalidArgument
.customizeDescription('Part number must be a number.'); .customizeDescription('Part number must be a number.');
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(error, null, corsHeaders); return callback(error, null, corsHeaders);
} }
if (partNumber < 1 || partNumber > 10000) { if (partNumber < 1 || partNumber > 10000) {
const error = errors.InvalidArgument const error = errors.InvalidArgument
.customizeDescription('Part number must be an ' + .customizeDescription('Part number must be an ' +
'integer between 1 and 10000, inclusive.'); 'integer between 1 and 10000, inclusive.');
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(error, null, corsHeaders); return callback(error, null, corsHeaders);
} }
} }
@ -144,6 +210,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
// get range for objects with multiple parts // get range for objects with multiple parts
if (byteRange && dataLocator.length > 1 && if (byteRange && dataLocator.length > 1 &&
dataLocator[0].start === undefined) { dataLocator[0].start === undefined) {
monitoring.promMetrics(
'GET', bucketName, 501, 'getObject');
return callback(errors.NotImplemented, null, corsHeaders); return callback(errors.NotImplemented, null, corsHeaders);
} }
if (objMD['x-amz-server-side-encryption']) { if (objMD['x-amz-server-side-encryption']) {
@ -161,6 +229,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
// Location objects prior to GA7.1 do not include the // Location objects prior to GA7.1 do not include the
// dataStoreETag field so we cannot find the part range // dataStoreETag field so we cannot find the part range
if (dataStoreETag === undefined) { if (dataStoreETag === undefined) {
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(errors.NotImplemented return callback(errors.NotImplemented
.customizeDescription('PartNumber parameter for ' + .customizeDescription('PartNumber parameter for ' +
'this object is not supported')); 'this object is not supported'));
@ -175,6 +245,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
} }
} }
if (locations.length === 0) { if (locations.length === 0) {
monitoring.promMetrics(
'GET', bucketName, 400, 'getObject');
return callback(errors.InvalidPartNumber, null, return callback(errors.InvalidPartNumber, null,
corsHeaders); corsHeaders);
} }
@ -192,6 +264,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
if (err) { if (err) {
log.error('error from external backend checking for ' + log.error('error from external backend checking for ' +
'object existence', { error: err }); 'object existence', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err); return callback(err);
} }
pushMetric('getObject', log, { pushMetric('getObject', log, {
@ -200,6 +274,8 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
newByteLength: newByteLength:
Number.parseInt(responseMetaHeaders['Content-Length'], 10), Number.parseInt(responseMetaHeaders['Content-Length'], 10),
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getObject',
Number.parseInt(responseMetaHeaders['Content-Length'], 10));
return callback(null, dataLocator, responseMetaHeaders, return callback(null, dataLocator, responseMetaHeaders,
byteRange); byteRange);
}); });

View File

@ -9,6 +9,7 @@ const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const vault = require('../auth/vault'); const vault = require('../auth/vault');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
// Sample XML response: // Sample XML response:
/* /*
@ -210,12 +211,15 @@ function objectGetACL(authInfo, request, log, callback) {
const resHeaders = collectCorsHeaders(request.headers.origin, const resHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
if (err) { if (err) {
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObjectAcl');
return callback(err, null, resHeaders); return callback(err, null, resHeaders);
} }
pushMetric('getObjectAcl', log, { pushMetric('getObjectAcl', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics('GET', bucketName, '200', 'getObjectAcl');
resHeaders['x-amz-version-id'] = resVersionId; resHeaders['x-amz-version-id'] = resVersionId;
return callback(null, xml, resHeaders); return callback(null, xml, resHeaders);
}); });

View File

@ -8,6 +8,7 @@ const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { convertToXml } = s3middleware.tagging; const { convertToXml } = s3middleware.tagging;
const monitoring = require('../utilities/monitoringHandler');
/** /**
* Object Get Tagging - Return tag for object * Object Get Tagging - Return tag for object
@ -79,11 +80,15 @@ function objectGetTagging(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'objectGetTagging' }); method: 'objectGetTagging' });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObjectTagging');
} else { } else {
pushMetric('getObjectTagging', log, { pushMetric('getObjectTagging', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'GET', bucketName, '200', 'getObjectTagging');
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] = additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD); getVersionIdResHeader(verCfg, objectMD);

View File

@ -6,6 +6,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const collectResponseHeaders = require('../utilities/collectResponseHeaders'); const collectResponseHeaders = require('../utilities/collectResponseHeaders');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const { getVersionIdResHeader } = require('./apiUtils/object/versioning'); const { getVersionIdResHeader } = require('./apiUtils/object/versioning');
const monitoring = require('../utilities/monitoringHandler');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
@ -51,10 +52,14 @@ function objectHead(authInfo, request, log, callback) {
error: err, error: err,
method: 'objectHead', method: 'objectHead',
}); });
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headObject');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
if (!objMD) { if (!objMD) {
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey; const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headObject');
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
@ -62,12 +67,16 @@ function objectHead(authInfo, request, log, callback) {
const responseHeaders = Object.assign({}, const responseHeaders = Object.assign({},
{ 'x-amz-delete-marker': true }, corsHeaders); { 'x-amz-delete-marker': true }, corsHeaders);
if (!versionId) { if (!versionId) {
monitoring.promMetrics(
'HEAD', bucketName, 404, 'headObject');
return callback(errors.NoSuchKey, responseHeaders); return callback(errors.NoSuchKey, responseHeaders);
} }
// return MethodNotAllowed if requesting a specific // return MethodNotAllowed if requesting a specific
// version that has a delete marker // version that has a delete marker
responseHeaders['x-amz-version-id'] = responseHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objMD); getVersionIdResHeader(verCfg, objMD);
monitoring.promMetrics(
'HEAD', bucketName, 405, 'headObject');
return callback(errors.MethodNotAllowed, responseHeaders); return callback(errors.MethodNotAllowed, responseHeaders);
} }
const headerValResult = validateHeaders(request.headers, const headerValResult = validateHeaders(request.headers,
@ -78,6 +87,7 @@ function objectHead(authInfo, request, log, callback) {
const responseHeaders = const responseHeaders =
collectResponseHeaders(objMD, corsHeaders, verCfg); collectResponseHeaders(objMD, corsHeaders, verCfg);
pushMetric('headObject', log, { authInfo, bucket: bucketName }); pushMetric('headObject', log, { authInfo, bucket: bucketName });
monitoring.promMetrics('HEAD', bucketName, '200', 'headObject');
return callback(null, responseHeaders); return callback(null, responseHeaders);
}); });
} }

View File

@ -1,6 +1,7 @@
const async = require('async'); const async = require('async');
const { errors, versioning } = require('arsenal'); const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils'); const aclUtils = require('../utilities/aclUtils');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation'); const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -9,6 +10,7 @@ const { checkQueryVersionId } = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const kms = require('../kms/wrapper'); const kms = require('../kms/wrapper');
const monitoring = require('../utilities/monitoringHandler');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -35,12 +37,21 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
log.debug('processing request', { method: 'objectPut' }); log.debug('processing request', { method: 'objectPut' });
if (!aclUtils.checkGrantHeaderValidity(request.headers)) { if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header'); log.trace('invalid acl header');
monitoring.promMetrics('PUT', request.bucketName, 400,
'putObject');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
const queryContainsVersionId = checkQueryVersionId(request.query); const queryContainsVersionId = checkQueryVersionId(request.query);
if (queryContainsVersionId instanceof Error) { if (queryContainsVersionId instanceof Error) {
return callback(queryContainsVersionId); return callback(queryContainsVersionId);
} }
const size = request.parsedContentLength;
if (Number.parseInt(size, 10) > constants.maximumAllowedUploadSize) {
log.debug('Upload size exceeds maximum allowed for a single PUT',
{ size });
return callback(errors.EntityTooLarge);
}
const bucketName = request.bucketName; const bucketName = request.bucketName;
const objectKey = request.objectKey; const objectKey = request.objectKey;
const requestType = 'objectPut'; const requestType = 'objectPut';
@ -57,11 +68,13 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
error: err, error: err,
method: 'metadataValidateBucketAndObj', method: 'metadataValidateBucketAndObj',
}); });
monitoring.promMetrics('PUT', bucketName, err.code, 'putObject');
return callback(err, responseHeaders); return callback(err, responseHeaders);
} }
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) { if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
log.trace('deleted flag on bucket and request ' + log.trace('deleted flag on bucket and request ' +
'from non-owner account'); 'from non-owner account');
monitoring.promMetrics('PUT', bucketName, 404, 'putObject');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
return async.waterfall([ return async.waterfall([
@ -86,6 +99,8 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
}, },
], (err, storingResult) => { ], (err, storingResult) => {
if (err) { if (err) {
monitoring.promMetrics('PUT', bucketName, err.code,
'putObject');
return callback(err, responseHeaders); return callback(err, responseHeaders);
} }
const newByteLength = request.parsedContentLength; const newByteLength = request.parsedContentLength;
@ -118,6 +133,8 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
newByteLength, newByteLength,
oldByteLength: isVersionedObj ? null : oldByteLength, oldByteLength: isVersionedObj ? null : oldByteLength,
}); });
monitoring.promMetrics('PUT', bucketName, '200',
'putObject', newByteLength, oldByteLength, isVersionedObj);
return callback(null, responseHeaders); return callback(null, responseHeaders);
}); });
}); });

View File

@ -10,6 +10,7 @@ const vault = require('../auth/vault');
const { decodeVersionId, getVersionIdResHeader } const { decodeVersionId, getVersionIdResHeader }
= require('./apiUtils/object/versioning'); = require('./apiUtils/object/versioning');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
/* /*
Format of xml request: Format of xml request:
@ -56,10 +57,12 @@ function objectPutACL(authInfo, request, log, cb) {
]; ];
if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) { if (newCannedACL && possibleCannedACL.indexOf(newCannedACL) === -1) {
log.trace('invalid canned acl argument', { cannedAcl: newCannedACL }); log.trace('invalid canned acl argument', { cannedAcl: newCannedACL });
monitoring.promMetrics('PUT', bucketName, 400, 'putObjectAcl');
return cb(errors.InvalidArgument); return cb(errors.InvalidArgument);
} }
if (!aclUtils.checkGrantHeaderValidity(request.headers)) { if (!aclUtils.checkGrantHeaderValidity(request.headers)) {
log.trace('invalid acl header'); log.trace('invalid acl header');
monitoring.promMetrics('PUT', bucketName, 400, 'putObjectAcl');
return cb(errors.InvalidArgument); return cb(errors.InvalidArgument);
} }
const possibleGroups = [ const possibleGroups = [
@ -288,6 +291,8 @@ function objectPutACL(authInfo, request, log, cb) {
error: err, error: err,
method: 'objectPutACL', method: 'objectPutACL',
}); });
monitoring.promMetrics(
'PUT', bucketName, err.code, 'putObjectAcl');
return cb(err, resHeaders); return cb(err, resHeaders);
} }
@ -301,6 +306,7 @@ function objectPutACL(authInfo, request, log, cb) {
bucket: bucketName, bucket: bucketName,
keys: [objectKey], keys: [objectKey],
}); });
monitoring.promMetrics('PUT', bucketName, '200', 'putObjectAcl');
return cb(null, resHeaders); return cb(null, resHeaders);
}); });
} }

View File

@ -11,6 +11,7 @@ const logger = require('../utilities/logger');
const services = require('../services'); const services = require('../services');
const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator'); const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const versionIdUtils = versioning.VersionID; const versionIdUtils = versioning.VersionID;
@ -46,6 +47,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
const partNumber = Number.parseInt(request.query.partNumber, 10); const partNumber = Number.parseInt(request.query.partNumber, 10);
// AWS caps partNumbers at 10,000 // AWS caps partNumbers at 10,000
if (partNumber > 10000 || !Number.isInteger(partNumber) || partNumber < 1) { if (partNumber > 10000 || !Number.isInteger(partNumber) || partNumber < 1) {
monitoring.promMetrics('PUT', destBucketName, 400,
'putObjectCopyPart');
return callback(errors.InvalidArgument); return callback(errors.InvalidArgument);
} }
// We pad the partNumbers so that the parts will be sorted // We pad the partNumbers so that the parts will be sorted
@ -335,6 +338,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
if (err && err !== skipError) { if (err && err !== skipError) {
log.trace('error from copy part waterfall', log.trace('error from copy part waterfall',
{ error: err }); { error: err });
monitoring.promMetrics('PUT', destBucketName, err.code,
'putObjectCopyPart');
return callback(err, null, corsHeaders); return callback(err, null, corsHeaders);
} }
const xml = [ const xml = [
@ -363,6 +368,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
newByteLength: copyObjectSize, newByteLength: copyObjectSize,
oldByteLength: prevObjectSize, oldByteLength: prevObjectSize,
}); });
monitoring.promMetrics(
'PUT', destBucketName, '200', 'putObjectCopyPart');
return callback(null, xml, additionalHeaders); return callback(null, xml, additionalHeaders);
}); });
} }

View File

@ -18,6 +18,7 @@ const { config } = require('../Config');
const multipleBackendGateway = require('../data/multipleBackendGateway'); const multipleBackendGateway = require('../data/multipleBackendGateway');
const locationConstraintCheck const locationConstraintCheck
= require('./apiUtils/object/locationConstraintCheck'); = require('./apiUtils/object/locationConstraintCheck');
const monitoring = require('../utilities/monitoringHandler');
const skipError = new Error('skip'); const skipError = new Error('skip');
@ -59,6 +60,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) { if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size }); log.debug('put part size too large', { size });
monitoring.promMetrics('PUT', request.bucketName, 400,
'putObjectPart');
return cb(errors.EntityTooLarge); return cb(errors.EntityTooLarge);
} }
@ -71,9 +74,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
const partNumber = Number.parseInt(request.query.partNumber, 10); const partNumber = Number.parseInt(request.query.partNumber, 10);
// AWS caps partNumbers at 10,000 // AWS caps partNumbers at 10,000
if (partNumber > 10000) { if (partNumber > 10000) {
monitoring.promMetrics('PUT', request.bucketName, 400,
'putObjectPart');
return cb(errors.TooManyParts); return cb(errors.TooManyParts);
} }
if (!Number.isInteger(partNumber) || partNumber < 1) { if (!Number.isInteger(partNumber) || partNumber < 1) {
monitoring.promMetrics('PUT', request.bucketName, 400,
'putObjectPart');
return cb(errors.InvalidArgument); return cb(errors.InvalidArgument);
} }
const bucketName = request.bucketName; const bucketName = request.bucketName;
@ -218,7 +225,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
// if data backend handles MPU, skip to end of waterfall // if data backend handles MPU, skip to end of waterfall
return next(skipError, destinationBucket, return next(skipError, destinationBucket,
partInfo.dataStoreETag); partInfo.dataStoreETag);
} else if (partInfo && partInfo.dataStoreType === 'azure') { } else if (partInfo &&
constants.s3HandledBackends[partInfo.dataStoreType]) {
return next(null, destinationBucket, return next(null, destinationBucket,
objectLocationConstraint, cipherBundle, splitter, objectLocationConstraint, cipherBundle, splitter,
partInfo); partInfo);
@ -267,7 +275,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
(destinationBucket, objectLocationConstraint, cipherBundle, (destinationBucket, objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, partInfo, next) => { partKey, prevObjectSize, oldLocations, partInfo, next) => {
// NOTE: set oldLocations to null so we do not batchDelete for now // NOTE: set oldLocations to null so we do not batchDelete for now
if (partInfo && partInfo.dataStoreType === 'azure') { if (partInfo &&
constants.skipBatchDeleteBackends[partInfo.dataStoreType]) {
// skip to storing metadata // skip to storing metadata
return next(null, destinationBucket, partInfo, return next(null, destinationBucket, partInfo,
partInfo.dataStoreETag, partInfo.dataStoreETag,
@ -371,6 +380,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
error: err, error: err,
method: 'objectPutPart', method: 'objectPutPart',
}); });
monitoring.promMetrics('PUT', bucketName, err.code,
'putObjectPart');
return cb(err, null, corsHeaders); return cb(err, null, corsHeaders);
} }
pushMetric('uploadPart', log, { pushMetric('uploadPart', log, {
@ -380,6 +391,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
newByteLength: size, newByteLength: size,
oldByteLength: prevObjectSize, oldByteLength: prevObjectSize,
}); });
monitoring.promMetrics('PUT', bucketName,
'200', 'putObjectPart', size, prevObjectSize);
return cb(null, hexDigest, corsHeaders); return cb(null, hexDigest, corsHeaders);
}); });
} }

View File

@ -6,6 +6,7 @@ const { decodeVersionId, getVersionIdResHeader } =
const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils'); const { metadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const { pushMetric } = require('../utapi/utilities'); const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const getReplicationInfo = require('./apiUtils/object/getReplicationInfo'); const getReplicationInfo = require('./apiUtils/object/getReplicationInfo');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper'); const metadata = require('../metadata/wrapper');
@ -102,12 +103,16 @@ function objectPutTagging(authInfo, request, log, callback) {
if (err) { if (err) {
log.trace('error processing request', { error: err, log.trace('error processing request', { error: err,
method: 'objectPutTagging' }); method: 'objectPutTagging' });
monitoring.promMetrics('PUT', bucketName, err.code,
'putObjectTagging');
} else { } else {
pushMetric('putObjectTagging', log, { pushMetric('putObjectTagging', log, {
authInfo, authInfo,
bucket: bucketName, bucket: bucketName,
keys: [objectKey], keys: [objectKey],
}); });
monitoring.promMetrics(
'PUT', bucketName, '200', 'putObjectTagging');
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] = additionalResHeaders['x-amz-version-id'] =
getVersionIdResHeader(verCfg, objectMD); getVersionIdResHeader(verCfg, objectMD);

View File

@ -2,6 +2,7 @@ const { errors } = require('arsenal');
const constants = require('../../constants'); const constants = require('../../constants');
const services = require('../services'); const services = require('../services');
const monitoring = require('../utilities/monitoringHandler');
/* /*
* Format of xml response: * Format of xml response:
@ -55,6 +56,8 @@ function serviceGet(authInfo, request, log, callback) {
if (authInfo.isRequesterPublicUser()) { if (authInfo.isRequesterPublicUser()) {
log.debug('operation not available for public user'); log.debug('operation not available for public user');
monitoring.promMetrics(
'GET', request.bucketName, 403, 'getService');
return callback(errors.AccessDenied); return callback(errors.AccessDenied);
} }
const xml = []; const xml = [];
@ -73,12 +76,15 @@ function serviceGet(authInfo, request, log, callback) {
return services.getService(authInfo, request, log, constants.splitter, return services.getService(authInfo, request, log, constants.splitter,
(err, userBuckets, splitter) => { (err, userBuckets, splitter) => {
if (err) { if (err) {
monitoring.promMetrics(
'GET', userBuckets, err.code, 'getService');
return callback(err); return callback(err);
} }
// TODO push metric for serviceGet // TODO push metric for serviceGet
// pushMetric('getService', log, { // pushMetric('getService', log, {
// bucket: bucketName, // bucket: bucketName,
// }); // });
monitoring.promMetrics('GET', userBuckets, '200', 'getService');
return callback(null, generateXml(xml, canonicalId, userBuckets, return callback(null, generateXml(xml, canonicalId, userBuckets,
splitter)); splitter));
}); });

View File

@ -32,6 +32,8 @@ function _errorActions(err, errorDocument, routingRules,
objectKey, err.code); objectKey, err.code);
if (errRoutingRule) { if (errRoutingRule) {
// route will redirect // route will redirect
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, null, corsHeaders, errRoutingRule, return callback(err, false, null, corsHeaders, errRoutingRule,
objectKey); objectKey);
} }
@ -42,6 +44,8 @@ function _errorActions(err, errorDocument, routingRules,
// error retrieving error document so return original error // error retrieving error document so return original error
// and set boolean of error retrieving user's error document // and set boolean of error retrieving user's error document
// to true // to true
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders); return callback(err, true, null, corsHeaders);
} }
// return the default error message if the object is private // return the default error message if the object is private
@ -49,6 +53,8 @@ function _errorActions(err, errorDocument, routingRules,
if (!isObjAuthorized(bucket, errObjMD, 'objectGet', if (!isObjAuthorized(bucket, errObjMD, 'objectGet',
constants.publicId)) { constants.publicId)) {
log.trace('errorObj not authorized', { error: err }); log.trace('errorObj not authorized', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, true, null, corsHeaders); return callback(err, true, null, corsHeaders);
} }
const dataLocator = errObjMD.location; const dataLocator = errObjMD.location;
@ -67,9 +73,13 @@ function _errorActions(err, errorDocument, routingRules,
bucket: bucketName, bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'], newByteLength: responseMetaHeaders['Content-Length'],
}); });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, dataLocator, responseMetaHeaders); return callback(err, false, dataLocator, responseMetaHeaders);
}); });
} }
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false, null, corsHeaders); return callback(err, false, null, corsHeaders);
} }
@ -89,16 +99,22 @@ function websiteGet(request, log, callback) {
return metadata.getBucket(bucketName, log, (err, bucket) => { return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) { if (err) {
log.trace('error retrieving bucket metadata', { error: err }); log.trace('error retrieving bucket metadata', { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
return callback(err, false); return callback(err, false);
} }
if (bucketShield(bucket, 'objectGet')) { if (bucketShield(bucket, 'objectGet')) {
log.trace('bucket in transient/deleted state so shielding'); log.trace('bucket in transient/deleted state so shielding');
monitoring.promMetrics(
'GET', bucketName, 404, 'getObject');
return callback(errors.NoSuchBucket, false); return callback(errors.NoSuchBucket, false);
} }
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
request.method, bucket); request.method, bucket);
const websiteConfig = bucket.getWebsiteConfiguration(); const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) { if (!websiteConfig) {
monitoring.promMetrics(
'GET', bucketName, 404, 'getObject');
return callback(errors.NoSuchWebsiteConfiguration, false, null, return callback(errors.NoSuchWebsiteConfiguration, false, null,
corsHeaders); corsHeaders);
} }
@ -142,6 +158,8 @@ function websiteGet(request, log, callback) {
if (err) { if (err) {
log.trace('error retrieving object metadata', log.trace('error retrieving object metadata',
{ error: err }); { error: err });
monitoring.promMetrics(
'GET', bucketName, err.code, 'getObject');
let returnErr = err; let returnErr = err;
const bucketAuthorized = isBucketAuthorized(bucket, const bucketAuthorized = isBucketAuthorized(bucket,
'bucketGet', constants.publicId); 'bucketGet', constants.publicId);
@ -205,6 +223,8 @@ function websiteGet(request, log, callback) {
bucket: bucketName, bucket: bucketName,
newByteLength: responseMetaHeaders['Content-Length'], newByteLength: responseMetaHeaders['Content-Length'],
}); });
monitoring.promMetrics('GET', bucketName, '200',
'getObject', responseMetaHeaders['Content-Length']);
return callback(null, false, dataLocator, responseMetaHeaders); return callback(null, false, dataLocator, responseMetaHeaders);
}); });
}); });

View File

@ -12,7 +12,6 @@ const { pushMetric } = require('../utapi/utilities');
const { isBucketAuthorized, isObjAuthorized } = const { isBucketAuthorized, isObjAuthorized } =
require('./apiUtils/authorization/permissionChecks'); require('./apiUtils/authorization/permissionChecks');
/** /**
* _errorActions - take a number of actions once have error getting obj * _errorActions - take a number of actions once have error getting obj
* @param {object} err - arsenal errors object * @param {object} err - arsenal errors object
@ -51,10 +50,14 @@ function websiteHead(request, log, callback) {
return metadata.getBucket(bucketName, log, (err, bucket) => { return metadata.getBucket(bucketName, log, (err, bucket) => {
if (err) { if (err) {
log.trace('error retrieving bucket metadata', { error: err }); log.trace('error retrieving bucket metadata', { error: err });
monitoring.promMetrics(
'HEAD', bucketName, err.code, 'headObject');
return callback(err); return callback(err);
} }
if (bucketShield(bucket, 'objectHead')) { if (bucketShield(bucket, 'objectHead')) {
log.trace('bucket in transient/deleted state so shielding'); log.trace('bucket in transient/deleted state so shielding');
monitoring.promMetrics(
'HEAD', bucketName, 404, 'headObject');
return callback(errors.NoSuchBucket); return callback(errors.NoSuchBucket);
} }
const corsHeaders = collectCorsHeaders(request.headers.origin, const corsHeaders = collectCorsHeaders(request.headers.origin,
@ -63,6 +66,8 @@ function websiteHead(request, log, callback) {
// head of an object. object ACL's are what matter // head of an object. object ACL's are what matter
const websiteConfig = bucket.getWebsiteConfiguration(); const websiteConfig = bucket.getWebsiteConfiguration();
if (!websiteConfig) { if (!websiteConfig) {
monitoring.promMetrics(
'HEAD', bucketName, 404, 'headObject');
return callback(errors.NoSuchWebsiteConfiguration); return callback(errors.NoSuchWebsiteConfiguration);
} }
// any errors above would be generic header error response // any errors above would be generic header error response
@ -151,6 +156,8 @@ function websiteHead(request, log, callback) {
pushMetric('headObject', log, { pushMetric('headObject', log, {
bucket: bucketName, bucket: bucketName,
}); });
monitoring.promMetrics(
'HEAD', bucketName, '200', 'headObject');
return callback(null, responseMetaHeaders); return callback(null, responseMetaHeaders);
}); });
}); });

View File

@ -1,17 +1,29 @@
const serviceAccountPrefix =
require('arsenal').constants.zenkoServiceAccount;
/** build simple authdata with only one account /** build simple authdata with only one account
* @param {string} accessKey - account's accessKey * @param {string} accessKey - account's accessKey
* @param {string} secretKey - account's secretKey * @param {string} secretKey - account's secretKey
* @param {string} canonicalId - account's canonical id
* @param {string} [serviceName] - service name to use to generate can id
* @param {string} userName - account's user name
* @return {object} authdata - authdata with account's accessKey and secretKey * @return {object} authdata - authdata with account's accessKey and secretKey
*/ */
function buildAuthDataAccount(accessKey, secretKey) { function buildAuthDataAccount(accessKey, secretKey, canonicalId, serviceName,
userName) {
// TODO: remove specific check for clueso and generate unique
// canonical id's for accounts
const finalCanonicalId = canonicalId ||
(serviceName ? `${serviceAccountPrefix}/${serviceName}` :
'12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47qwer');
const shortid = '123456789012';
return { return {
accounts: [{ accounts: [{
name: 'CustomAccount', name: userName || 'CustomAccount',
email: 'customaccount1@setbyenv.com', email: 'customaccount1@setbyenv.com',
arn: 'arn:aws:iam::123456789012:root', arn: `arn:aws:iam::${shortid}:root`,
canonicalID: '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' + canonicalID: finalCanonicalId,
'18e7cd47qwer', shortid,
shortid: '123456789012',
keys: [{ keys: [{
access: accessKey, access: accessKey,
secret: secretKey, secret: secretKey,

View File

@ -1,5 +1,6 @@
const { errors, s3middleware } = require('arsenal');
const AWS = require('aws-sdk'); const AWS = require('aws-sdk');
const { errors, s3middleware } = require('arsenal');
const werelogs = require('werelogs');
const MD5Sum = s3middleware.MD5Sum; const MD5Sum = s3middleware.MD5Sum;
const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders; const getMetaHeaders = s3middleware.userMetadata.getMetaHeaders;
const createLogger = require('../multipleBackendLogger'); const createLogger = require('../multipleBackendLogger');
@ -15,12 +16,48 @@ const missingVerIdInternalError = errors.InternalError.customizeDescription(
class AwsClient { class AwsClient {
constructor(config) { constructor(config) {
this.clientType = 'aws_s3'; this.clientType = 'aws_s3';
this.type = 'AWS';
this._s3Params = config.s3Params; this._s3Params = config.s3Params;
this._awsBucketName = config.bucketName; this._awsBucketName = config.bucketName;
this._bucketMatch = config.bucketMatch; this._bucketMatch = config.bucketMatch;
this._dataStoreName = config.dataStoreName; this._dataStoreName = config.dataStoreName;
this._serverSideEncryption = config.serverSideEncryption; this._serverSideEncryption = config.serverSideEncryption;
this._supportsVersioning = config.supportsVersioning;
this._client = new AWS.S3(this._s3Params); this._client = new AWS.S3(this._s3Params);
this._logger = new werelogs.Logger('AwsClient');
}
setup(cb) {
// this request implicitly updates the endpoint for the location
// the following code explcitly sets it to avoid surprises
this._client.getBucketLocation({ Bucket: this._awsBucketName },
(err, res) => {
if (err && err.code !== 'AuthorizationHeaderMalformed') {
this._logger.error('error during setup', {
error: err,
method: 'AwsClient.setup',
});
return cb(err);
}
let region;
if (err && err.code === 'AuthorizationHeaderMalformed') {
// set regional endpoint
region = err.region;
} else if (res) {
region = res.LocationConstraint;
}
const isAWS = this._s3Params.endpoint.endsWith('amazonaws.com');
if (region && isAWS) {
const endpoint = `s3.${region}.amazonaws.com`;
this._logger.debug('setting regional endpoint', {
method: 'AwsClient.setup',
region,
endpoint,
});
this._client.endpoint = new AWS.Endpoint(endpoint);
}
return cb();
});
} }
_createAwsKey(requestBucketName, requestObjectKey, _createAwsKey(requestBucketName, requestObjectKey,
@ -30,6 +67,14 @@ class AwsClient {
} }
return `${requestBucketName}/${requestObjectKey}`; return `${requestBucketName}/${requestObjectKey}`;
} }
toObjectGetInfo(objectKey, bucketName) {
return {
key: this._createAwsKey(bucketName, objectKey, this._bucketMatch),
dataStoreName: this._dataStoreName,
};
}
put(stream, size, keyContext, reqUids, callback) { put(stream, size, keyContext, reqUids, callback) {
const awsKey = this._createAwsKey(keyContext.bucketName, const awsKey = this._createAwsKey(keyContext.bucketName,
keyContext.objectKey, this._bucketMatch); keyContext.objectKey, this._bucketMatch);
@ -39,16 +84,16 @@ class AwsClient {
const putCb = (err, data) => { const putCb = (err, data) => {
if (err) { if (err) {
logHelper(log, 'error', 'err from data backend', logHelper(log, 'error', 'err from data backend',
err, this._dataStoreName); err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
if (!data.VersionId) { if (!data.VersionId && this._supportsVersioning) {
logHelper(log, 'error', 'missing version id for data ' + logHelper(log, 'error', 'missing version id for data ' +
'backend object', missingVerIdInternalError, 'backend object', missingVerIdInternalError,
this._dataStoreName); this._dataStoreName, this.clientType);
return callback(missingVerIdInternalError); return callback(missingVerIdInternalError);
} }
const dataStoreVersionId = data.VersionId; const dataStoreVersionId = data.VersionId;
@ -106,14 +151,15 @@ class AwsClient {
if (err.code === 'NotFound') { if (err.code === 'NotFound') {
const error = errors.ServiceUnavailable const error = errors.ServiceUnavailable
.customizeDescription( .customizeDescription(
'Unexpected error from AWS: "NotFound". Data on AWS ' + `Unexpected error from ${this.type}: ` +
`"NotFound". Data on ${this.type} ` +
'may have been altered outside of CloudServer.' 'may have been altered outside of CloudServer.'
); );
return callback(error); return callback(error);
} }
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(); return callback();
@ -128,12 +174,14 @@ class AwsClient {
VersionId: dataStoreVersionId, VersionId: dataStoreVersionId,
Range: range ? `bytes=${range[0]}-${range[1]}` : null, Range: range ? `bytes=${range[0]}-${range[1]}` : null,
}).on('success', response => { }).on('success', response => {
log.trace('AWS GET request response headers', log.trace(`${this.type} GET request response headers`,
{ responseHeaders: response.httpResponse.headers }); { responseHeaders: response.httpResponse.headers,
backendType: this.clientType });
}); });
const stream = request.createReadStream().on('error', err => { const stream = request.createReadStream().on('error', err => {
logHelper(log, 'error', 'error streaming data from AWS', logHelper(log, 'error',
err, this._dataStoreName); `error streaming data from ${this.type}`,
err, this._dataStoreName, this.clientType);
return callback(err); return callback(err);
}); });
return callback(null, stream); return callback(null, stream);
@ -151,8 +199,8 @@ class AwsClient {
return this._client.deleteObject(params, err => { return this._client.deleteObject(params, err => {
if (err) { if (err) {
logHelper(log, 'error', 'error deleting object from ' + logHelper(log, 'error', 'error deleting object from ' +
'datastore', err, this._dataStoreName); 'datastore', err, this._dataStoreName, this.clientType);
if (err.code === 'NoSuchVersion') { if (err.code === 'NoSuchVersion' || err.code === 'NoSuchKey') {
// data may have been deleted directly from the AWS backend // data may have been deleted directly from the AWS backend
// don't want to retry the delete and errors are not // don't want to retry the delete and errors are not
// sent back to client anyway, so no need to return err // sent back to client anyway, so no need to return err
@ -160,7 +208,7 @@ class AwsClient {
} }
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(); return callback();
@ -176,6 +224,12 @@ class AwsClient {
awsResp[location] = { error: err, external: true }; awsResp[location] = { error: err, external: true };
return callback(null, awsResp); return callback(null, awsResp);
} }
if (!this._supportsVersioning) {
awsResp[location] = {
message: 'Congrats! You own the bucket',
};
return callback(null, awsResp);
}
return this._client.getBucketVersioning({ return this._client.getBucketVersioning({
Bucket: this._awsBucketName }, Bucket: this._awsBucketName },
(err, data) => { (err, data) => {
@ -208,14 +262,13 @@ class AwsClient {
metaHeadersTrimmed[headerKey] = metaHeaders[header]; metaHeadersTrimmed[headerKey] = metaHeaders[header];
} }
}); });
Object.assign(metaHeaders, metaHeadersTrimmed);
const awsBucket = this._awsBucketName; const awsBucket = this._awsBucketName;
const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch); const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch);
const params = { const params = {
Bucket: awsBucket, Bucket: awsBucket,
Key: awsKey, Key: awsKey,
WebsiteRedirectLocation: websiteRedirectHeader, WebsiteRedirectLocation: websiteRedirectHeader,
Metadata: metaHeaders, Metadata: metaHeadersTrimmed,
ContentType: contentType, ContentType: contentType,
CacheControl: cacheControl, CacheControl: cacheControl,
ContentDisposition: contentDisposition, ContentDisposition: contentDisposition,
@ -224,10 +277,10 @@ class AwsClient {
return this._client.createMultipartUpload(params, (err, mpuResObj) => { return this._client.createMultipartUpload(params, (err, mpuResObj) => {
if (err) { if (err) {
logHelper(log, 'error', 'err from data backend', logHelper(log, 'error', 'err from data backend',
err, this._dataStoreName); err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(null, mpuResObj); return callback(null, mpuResObj);
@ -252,10 +305,10 @@ class AwsClient {
return this._client.uploadPart(params, (err, partResObj) => { return this._client.uploadPart(params, (err, partResObj) => {
if (err) { if (err) {
logHelper(log, 'error', 'err from data backend ' + logHelper(log, 'error', 'err from data backend ' +
'on uploadPart', err, this._dataStoreName); 'on uploadPart', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
// Because we manually add quotes to ETag later, remove quotes here // Because we manually add quotes to ETag later, remove quotes here
@ -280,10 +333,10 @@ class AwsClient {
return this._client.listParts(params, (err, partList) => { return this._client.listParts(params, (err, partList) => {
if (err) { if (err) {
logHelper(log, 'error', 'err from data backend on listPart', logHelper(log, 'error', 'err from data backend on listPart',
err, this._dataStoreName); err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
// build storedParts object to mimic Scality S3 backend returns // build storedParts object to mimic Scality S3 backend returns
@ -348,20 +401,20 @@ class AwsClient {
if (err) { if (err) {
if (mpuError[err.code]) { if (mpuError[err.code]) {
logHelper(log, 'trace', 'err from data backend on ' + logHelper(log, 'trace', 'err from data backend on ' +
'completeMPU', err, this._dataStoreName); 'completeMPU', err, this._dataStoreName, this.clientType);
return callback(errors[err.code]); return callback(errors[err.code]);
} }
logHelper(log, 'error', 'err from data backend on ' + logHelper(log, 'error', 'err from data backend on ' +
'completeMPU', err, this._dataStoreName); 'completeMPU', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
if (!completeMpuRes.VersionId) { if (!completeMpuRes.VersionId && this._supportsVersioning) {
logHelper(log, 'error', 'missing version id for data ' + logHelper(log, 'error', 'missing version id for data ' +
'backend object', missingVerIdInternalError, 'backend object', missingVerIdInternalError,
this._dataStoreName); this._dataStoreName, this.clientType);
return callback(missingVerIdInternalError); return callback(missingVerIdInternalError);
} }
// need to get content length of new object to store // need to get content length of new object to store
@ -370,17 +423,18 @@ class AwsClient {
(err, objHeaders) => { (err, objHeaders) => {
if (err) { if (err) {
logHelper(log, 'trace', 'err from data backend on ' + logHelper(log, 'trace', 'err from data backend on ' +
'headObject', err, this._dataStoreName); 'headObject', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
// remove quotes from eTag because they're added later // remove quotes from eTag because they're added later
completeObjData.eTag = completeMpuRes.ETag completeObjData.eTag = completeMpuRes.ETag
.substring(1, completeMpuRes.ETag.length - 1); .substring(1, completeMpuRes.ETag.length - 1);
completeObjData.dataStoreVersionId = completeMpuRes.VersionId; completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
completeObjData.contentLength = objHeaders.ContentLength; completeObjData.contentLength =
Number.parseInt(objHeaders.ContentLength, 10);
return callback(null, completeObjData); return callback(null, completeObjData);
}); });
}); });
@ -396,10 +450,11 @@ class AwsClient {
if (err) { if (err) {
logHelper(log, 'error', 'There was an error aborting ' + logHelper(log, 'error', 'There was an error aborting ' +
'the MPU on AWS S3. You should abort directly on AWS S3 ' + 'the MPU on AWS S3. You should abort directly on AWS S3 ' +
'using the same uploadId.', err, this._dataStoreName); 'using the same uploadId.', err, this._dataStoreName,
this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(); return callback();
@ -424,10 +479,11 @@ class AwsClient {
return this._client.putObjectTagging(tagParams, err => { return this._client.putObjectTagging(tagParams, err => {
if (err) { if (err) {
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'putObjectTagging', err, this._dataStoreName); 'putObjectTagging', err,
this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(); return callback();
@ -446,10 +502,11 @@ class AwsClient {
return this._client.deleteObjectTagging(tagParams, err => { return this._client.deleteObjectTagging(tagParams, err => {
if (err) { if (err) {
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'deleteObjectTagging', err, this._dataStoreName); 'deleteObjectTagging', err,
this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
return callback(); return callback();
@ -482,24 +539,24 @@ class AwsClient {
if (err) { if (err) {
if (err.code === 'AccessDenied') { if (err.code === 'AccessDenied') {
logHelper(log, 'error', 'Unable to access ' + logHelper(log, 'error', 'Unable to access ' +
`${sourceAwsBucketName} AWS bucket`, err, `${sourceAwsBucketName} ${this.type} bucket`, err,
this._dataStoreName); this._dataStoreName, this.clientType);
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' + .customizeDescription('Error: Unable to access ' +
`${sourceAwsBucketName} AWS bucket`) `${sourceAwsBucketName} ${this.type} bucket`)
); );
} }
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'copyObject', err, this._dataStoreName); 'copyObject', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
if (!copyResult.VersionId) { if (!copyResult.VersionId && this._supportsVersioning) {
logHelper(log, 'error', 'missing version id for data ' + logHelper(log, 'error', 'missing version id for data ' +
'backend object', missingVerIdInternalError, 'backend object', missingVerIdInternalError,
this._dataStoreName); this._dataStoreName, this.clientType);
return callback(missingVerIdInternalError); return callback(missingVerIdInternalError);
} }
return callback(null, destAwsKey, copyResult.VersionId); return callback(null, destAwsKey, copyResult.VersionId);
@ -532,17 +589,17 @@ class AwsClient {
if (err.code === 'AccessDenied') { if (err.code === 'AccessDenied') {
logHelper(log, 'error', 'Unable to access ' + logHelper(log, 'error', 'Unable to access ' +
`${sourceAwsBucketName} AWS bucket`, err, `${sourceAwsBucketName} AWS bucket`, err,
this._dataStoreName); this._dataStoreName, this.clientType);
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' + .customizeDescription('Error: Unable to access ' +
`${sourceAwsBucketName} AWS bucket`) `${sourceAwsBucketName} AWS bucket`)
); );
} }
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'uploadPartCopy', err, this._dataStoreName); 'uploadPartCopy', err, this._dataStoreName, this.clientType);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `${this.type}: ${err.message}`)
); );
} }
const eTag = removeQuotes(res.CopyPartResult.ETag); const eTag = removeQuotes(res.CopyPartResult.ETag);

View File

@ -9,8 +9,12 @@ const { validateAndFilterMpuParts } =
require('../../api/apiUtils/object/processMpuParts'); require('../../api/apiUtils/object/processMpuParts');
const constants = require('../../../constants'); const constants = require('../../../constants');
const metadata = require('../../metadata/wrapper'); const metadata = require('../../metadata/wrapper');
const packageVersion = require('../../../package.json').version;
const azureMpuUtils = s3middleware.azureHelper.mpuUtils; const azureMpuUtils = s3middleware.azureHelper.mpuUtils;
azure.Constants.USER_AGENT_PRODUCT_NAME = constants.productName;
azure.Constants.USER_AGENT_PRODUCT_VERSION = packageVersion;
class AzureClient { class AzureClient {
constructor(config) { constructor(config) {
this._azureStorageEndpoint = config.azureStorageEndpoint; this._azureStorageEndpoint = config.azureStorageEndpoint;
@ -22,12 +26,16 @@ class AzureClient {
this._azureStorageEndpoint); this._azureStorageEndpoint);
this._dataStoreName = config.dataStoreName; this._dataStoreName = config.dataStoreName;
this._bucketMatch = config.bucketMatch; this._bucketMatch = config.bucketMatch;
if (config.proxy) { if (config.proxy && config.proxy.url) {
const parsedUrl = url.parse(config.proxy); const parsedUrl = url.parse(config.proxy.url);
if (!parsedUrl.port) { if (!parsedUrl.port) {
parsedUrl.port = 80; parsedUrl.port = 80;
} }
this._client.setProxy(parsedUrl); const proxyParams = parsedUrl;
if (config.proxy.certs) {
Object.assign(proxyParams, config.proxy.certs);
}
this._client.setProxy(proxyParams);
} }
} }
@ -102,6 +110,13 @@ class AzureClient {
}); });
} }
toObjectGetInfo(objectKey, bucketName) {
return {
key: this._createAzureKey(bucketName, objectKey, this._bucketMatch),
dataStoreName: this._dataStoreName,
};
}
put(stream, size, keyContext, reqUids, callback) { put(stream, size, keyContext, reqUids, callback) {
const log = createLogger(reqUids); const log = createLogger(reqUids);
// before blob is put, make sure there is no ongoing MPU with same key // before blob is put, make sure there is no ongoing MPU with same key

View File

@ -0,0 +1,33 @@
const { errors } = require('arsenal');
const MpuHelper = require('./mpuHelper');
const { createMpuKey, logger } = require('../GcpUtils');
const { logHelper } = require('../../utils');
/**
* abortMPU - remove all objects of a GCP Multipart Upload
* @param {object} params - abortMPU params
* @param {string} params.Bucket - bucket name
* @param {string} params.MPU - mpu bucket name
* @param {string} params.Key - object key
* @param {number} params.UploadId - MPU upload id
* @param {function} callback - callback function to call
* @return {undefined}
*/
function abortMPU(params, callback) {
if (!params || !params.Key || !params.UploadId ||
!params.Bucket || !params.MPU) {
const error = errors.InvalidRequest
.customizeDescription('Missing required parameter');
logHelper(logger, 'error', 'error in abortMultipartUpload', error);
return callback(error);
}
const mpuHelper = new MpuHelper(this);
const delParams = {
Bucket: params.Bucket,
MPU: params.MPU,
Prefix: createMpuKey(params.Key, params.UploadId),
};
return mpuHelper.removeParts(delParams, callback);
}
module.exports = abortMPU;

View File

@ -0,0 +1,69 @@
const async = require('async');
const { errors } = require('arsenal');
const MpuHelper = require('./mpuHelper');
const { createMpuKey, logger } = require('../GcpUtils');
const { logHelper } = require('../../utils');
/**
* completeMPU - merges a list of parts into a single object
* @param {object} params - completeMPU params
* @param {string} params.Bucket - bucket name
* @param {string} params.MPU - mpu bucket name
* @param {string} params.Key - object key
* @param {number} params.UploadId - MPU upload id
* @param {Object} params.MultipartUpload - MPU upload object
* @param {Object[]} param.MultipartUpload.Parts - a list of parts to merge
* @param {function} callback - callback function to call with MPU result
* @return {undefined}
*/
function completeMPU(params, callback) {
if (!params || !params.MultipartUpload ||
!params.MultipartUpload.Parts || !params.UploadId ||
!params.Bucket || !params.Key) {
const error = errors.InvalidRequest
.customizeDescription('Missing required parameter');
logHelper(logger, 'error', 'error in completeMultipartUpload', error);
return callback(error);
}
const partList = params.MultipartUpload.Parts;
// verify that the part list is in order
if (params.MultipartUpload.Parts.length === 0) {
const error = errors.InvalidRequest
.customizeDescription('You must specify at least one part');
logHelper(logger, 'error', 'error in completeMultipartUpload', error);
return callback(error);
}
for (let ind = 1; ind < partList.length; ++ind) {
if (partList[ind - 1].PartNumber >= partList[ind].PartNumber) {
logHelper(logger, 'error', 'error in completeMultipartUpload',
errors.InvalidPartOrder);
return callback(errors.InvalidPartOrder);
}
}
const mpuHelper = new MpuHelper(this); // this === GcpClient
return async.waterfall([
next => {
// first compose: in mpu bucket
// max 10,000 => 313 parts
// max component count per object 32
logger.trace('completeMultipartUpload: compose',
{ partCount: partList.length });
mpuHelper.splitMerge(params, partList, 'compose', next);
},
(numParts, next) => mpuHelper.composeFinal(numParts, params, next),
(result, next) => mpuHelper.generateMpuResult(result, partList, next),
(result, aggregateETag, next) =>
mpuHelper.copyToMain(result, aggregateETag, params, next),
(mpuResult, next) => {
const delParams = {
Bucket: params.Bucket,
MPU: params.MPU,
Prefix: createMpuKey(params.Key, params.UploadId),
};
return mpuHelper.removeParts(delParams,
err => next(err, mpuResult));
},
], callback);
}
module.exports = completeMPU;

View File

@ -0,0 +1,51 @@
const uuid = require('uuid/v4');
const { errors } = require('arsenal');
const { createMpuKey, logger, getPutTagsMetadata } = require('../GcpUtils');
const { logHelper } = require('../../utils');
/**
* createMPU - creates a MPU upload on GCP (sets a 0-byte object placeholder
* with for the final composed object)
* @param {object} params - createMPU param
* @param {string} params.Bucket - bucket name
* @param {string} params.Key - object key
* @param {string} params.Metadata - object Metadata
* @param {string} params.ContentType - Content-Type header
* @param {string} params.CacheControl - Cache-Control header
* @param {string} params.ContentDisposition - Content-Disposition header
* @param {string} params.ContentEncoding - Content-Encoding header
* @param {function} callback - callback function to call with the generated
* upload-id for MPU operations
* @return {undefined}
*/
function createMPU(params, callback) {
// As google cloud does not have a create MPU function,
// create an empty 'init' object that will temporarily store the
// object metadata and return an upload ID to mimic an AWS MPU
if (!params || !params.Bucket || !params.Key) {
const error = errors.InvalidRequest
.customizeDescription('Missing required parameter');
logHelper(logger, 'error', 'error in createMultipartUpload', error);
return callback(error);
}
const uploadId = uuid().replace(/-/g, '');
const mpuParams = {
Bucket: params.Bucket,
Key: createMpuKey(params.Key, uploadId, 'init'),
Metadata: params.Metadata,
ContentType: params.ContentType,
CacheControl: params.CacheControl,
ContentDisposition: params.ContentDisposition,
ContentEncoding: params.ContentEncoding,
};
mpuParams.Metadata = getPutTagsMetadata(mpuParams.Metadata, params.Tagging);
return this.putObject(mpuParams, err => {
if (err) {
logHelper(logger, 'error', 'error in createMPU - putObject', err);
return callback(err);
}
return callback(null, { UploadId: uploadId });
});
}
module.exports = createMPU;

View File

@ -0,0 +1,24 @@
const async = require('async');
const { stripTags } = require('../GcpUtils');
function deleteObjectTagging(params, callback) {
return async.waterfall([
next => this.headObject({
Bucket: params.Bucket,
Key: params.Key,
VersionId: params.VersionId,
}, next),
(resObj, next) => {
const completeMD = stripTags(resObj.Metadata);
this.copyObject({
Bucket: params.Bucket,
Key: params.Key,
CopySource: `${params.Bucket}/${params.Key}`,
Metadata: completeMD,
MetadataDirective: 'REPLACE',
}, next);
},
], callback);
}
module.exports = deleteObjectTagging;

View File

@ -0,0 +1,19 @@
const { retrieveTags } = require('../GcpUtils');
function getObjectTagging(params, callback) {
const headParams = {
Bucket: params.Bucket,
Key: params.Key,
VersionId: params.VersionId,
};
this.headObject(headParams, (err, res) => {
const TagSet = retrieveTags(res.Metadata);
const retObj = {
VersionId: res.VersionId,
TagSet,
};
return callback(null, retObj);
});
}
module.exports = getObjectTagging;

14
lib/data/external/GCP/GcpApis/index.js vendored Normal file
View File

@ -0,0 +1,14 @@
module.exports = {
// mpu functions
abortMultipartUpload: require('./abortMPU'),
completeMultipartUpload: require('./completeMPU'),
createMultipartUpload: require('./createMPU'),
listParts: require('./listParts'),
uploadPart: require('./uploadPart'),
uploadPartCopy: require('./uploadPartCopy'),
// object tagging
putObject: require('./putObject'),
putObjectTagging: require('./putTagging'),
getObjectTagging: require('./getTagging'),
deleteObjectTagging: require('./deleteTagging'),
};

View File

@ -0,0 +1,42 @@
const { errors } = require('arsenal');
const { createMpuKey, logger } = require('../GcpUtils');
const { logHelper } = require('../../utils');
/**
* listParts - list uploaded MPU parts
* @param {object} params - listParts param
* @param {string} params.Bucket - bucket name
* @param {string} params.Key - object key
* @param {string} params.UploadId - MPU upload id
* @param {function} callback - callback function to call with the list of parts
* @return {undefined}
*/
function listParts(params, callback) {
if (!params || !params.UploadId || !params.Bucket || !params.Key) {
const error = errors.InvalidRequest
.customizeDescription('Missing required parameter');
logHelper(logger, 'error', 'error in listParts', error);
return callback(error);
}
if (params.PartNumberMarker && params.PartNumberMarker < 0) {
return callback(errors.InvalidArgument
.customizeDescription('The request specified an invalid marker'));
}
const mpuParams = {
Bucket: params.Bucket,
Prefix: createMpuKey(params.Key, params.UploadId, 'parts'),
Marker: createMpuKey(params.Key, params.UploadId,
params.PartNumberMarker, 'parts'),
MaxKeys: params.MaxParts,
};
return this.listObjects(mpuParams, (err, res) => {
if (err) {
logHelper(logger, 'error',
'error in listParts - listObjects', err);
return callback(err);
}
return callback(null, res);
});
}
module.exports = listParts;

View File

@ -0,0 +1,316 @@
const async = require('async');
const Backoff = require('backo');
const { eachSlice, createMpuKey, createMpuList, logger } =
require('../GcpUtils');
const { logHelper } = require('../../utils');
const { createAggregateETag } =
require('../../../../api/apiUtils/object/processMpuParts');
const BACKOFF_PARAMS = { min: 1000, max: 300000, jitter: 0.1, factor: 1.5 };
class MpuHelper {
constructor(service, options = {}) {
this.service = service;
this.backoffParams = {
min: options.min || BACKOFF_PARAMS.min,
max: options.max || BACKOFF_PARAMS.max,
jitter: options.jitter || BACKOFF_PARAMS.jitter,
factor: options.factor || BACKOFF_PARAMS.factor,
};
}
_retry(fnName, params, callback) {
const backoff = new Backoff(this.backoffParams);
const handleFunc = (fnName, params, retry, callback) => {
const timeout = backoff.duration();
return setTimeout((params, cb) =>
this.service[fnName](params, cb), timeout, params,
(err, res) => {
if (err) {
if (err.statusCode === 429 || err.code === 429) {
if (fnName === 'composeObject') {
logger.trace('composeObject: slow down request',
{ retryCount: retry, timeout });
} else if (fnName === 'copyObject') {
logger.trace('copyObject: slow down request',
{ retryCount: retry, timeout });
}
return handleFunc(
fnName, params, retry + 1, callback);
}
logHelper(logger, 'error', `${fnName} failed`, err);
return callback(err);
}
backoff.reset();
return callback(null, res);
});
};
handleFunc(fnName, params, 0, callback);
}
/**
* retryCompose - exponential backoff retry implementation for the compose
* operation
* @param {object} params - compose object params
* @param {function} callback - callback function to call with the result
* of the compose operation
* @return {undefined}
*/
retryCompose(params, callback) {
this._retry('composeObject', params, callback);
}
/**
* retryCopy - exponential backoff retry implementation for the copy
* operation
* @param {object} params - copy object params
* @param {function} callback - callback function to call with the result
* of the copy operation
* @return {undefined}
*/
retryCopy(params, callback) {
this._retry('copyObject', params, callback);
}
/**
* splitMerge - breaks down the MPU list of parts to be compose on GCP;
* splits partList into chunks of 32 objects, the limit of each compose
* operation.
* @param {object} params - complete MPU params
* @param {string} params.Bucket - bucket name
* @param {string} params.MPU - mpu bucket name
* @param {string} params.Key - object key
* @param {string} params.UploadId - MPU upload id
* @param {object[]} partList - list of parts for complete multipart upload
* @param {string} level - the phase name of the MPU process
* @param {function} callback - the callback function to call
* @return {undefined}
*/
splitMerge(params, partList, level, callback) {
// create composition of slices from the partList array
return async.mapLimit(eachSlice.call(partList, 32),
this.service._maxConcurrent,
(infoParts, cb) => {
const mpuPartList = infoParts.Parts.map(item =>
({ PartName: item.PartName }));
const partNumber = infoParts.PartNumber;
const tmpKey =
createMpuKey(params.Key, params.UploadId, partNumber, level);
const mergedObject = { PartName: tmpKey };
if (mpuPartList.length < 2) {
logger.trace(
'splitMerge: parts are fewer than 2, copy instead');
// else just perform a copy
const copyParams = {
Bucket: params.MPU,
Key: tmpKey,
CopySource: `${params.MPU}/${mpuPartList[0].PartName}`,
};
return this.service.copyObject(copyParams, (err, res) => {
if (err) {
logHelper(logger, 'error',
'error in splitMerge - copyObject', err);
return cb(err);
}
mergedObject.VersionId = res.VersionId;
mergedObject.ETag = res.ETag;
return cb(null, mergedObject);
});
}
const composeParams = {
Bucket: params.MPU,
Key: tmpKey,
MultipartUpload: { Parts: mpuPartList },
};
return this.retryCompose(composeParams, (err, res) => {
if (err) {
return cb(err);
}
mergedObject.VersionId = res.VersionId;
mergedObject.ETag = res.ETag;
return cb(null, mergedObject);
});
}, (err, res) => {
if (err) {
return callback(err);
}
return callback(null, res.length);
});
}
/**
* removeParts - remove all objects created to perform a multipart upload
* @param {object} params - remove parts params
* @param {string} params.Bucket - bucket name
* @param {string} params.MPU - mpu bucket name
* @param {string} params.Key - object key
* @param {string} params.UploadId - MPU upload id
* @param {function} callback - callback function to call
* @return {undefined}
*/
removeParts(params, callback) {
const _getObjectVersions = callback => {
logger.trace('remove all parts from mpu bucket');
let partsList = [];
let isTruncated = true;
let nextMarker;
return async.whilst(() => isTruncated, next => {
const listParams = {
Bucket: params.MPU,
Prefix: params.Prefix,
Marker: nextMarker,
};
return this.service.listVersions(listParams, (err, res) => {
if (err) {
logHelper(logger, 'error', 'error in ' +
'removeParts - listVersions', err);
return next(err);
}
nextMarker = res.NextMarker;
isTruncated = res.IsTruncated;
partsList = partsList.concat(res.Versions);
return next();
});
}, err => callback(err, partsList));
};
const _deleteObjects = (partsList, callback) => {
logger.trace('successfully listed mpu parts', {
objectCount: partsList.length,
});
return async.eachLimit(partsList, 10, (obj, next) => {
const delParams = {
Bucket: params.MPU,
Key: obj.Key,
VersionId: obj.VersionId,
};
this.service.deleteObject(delParams, err => {
if (err) {
logHelper(logger, 'error',
'error deleting object', err);
return next(err);
}
return next();
});
}, err => callback(err));
};
return async.waterfall([
_getObjectVersions,
_deleteObjects,
], err => callback(err));
}
composeFinal(numParts, params, callback) {
// final compose:
// number of parts to compose <= 10
// perform final compose in mpu bucket
logger.trace('completeMultipartUpload: final compose');
const parts = createMpuList(params, 'compose', numParts);
const partList = parts.map(item => (
{ PartName: item.PartName }));
if (partList.length < 2) {
logger.trace(
'fewer than 2 parts, skip to copy phase');
return callback(null, partList[0].PartName);
}
const composeParams = {
Bucket: params.MPU,
Key: createMpuKey(params.Key, params.UploadId, 'final'),
MultipartUpload: { Parts: partList },
};
return this.retryCompose(composeParams, err => {
if (err) {
return callback(err);
}
return callback(null, null);
});
}
/*
* Create MPU Aggregate ETag
*/
generateMpuResult(res, partList, callback) {
const concatETag = partList.reduce((prev, curr) =>
prev + curr.ETag.substring(1, curr.ETag.length - 1), '');
const aggregateETag = createAggregateETag(concatETag, partList);
return callback(null, res, aggregateETag);
}
copyToMain(res, aggregateETag, params, callback) {
// move object from mpu bucket into the main bucket
// retrieve initial metadata then compose the object
const copySource = res ||
createMpuKey(params.Key, params.UploadId, 'final');
return async.waterfall([
next => {
// retrieve metadata from init object in mpu bucket
const headParams = {
Bucket: params.MPU,
Key: createMpuKey(params.Key, params.UploadId,
'init'),
};
logger.trace('retrieving object metadata');
return this.service.headObject(headParams, (err, res) => {
if (err) {
logHelper(logger, 'error',
'error in createMultipartUpload - headObject',
err);
return next(err);
}
return next(null, res);
});
},
(res, next) => {
const metadata = res.Metadata;
// copy the final object into the main bucket
const copyMetadata = Object.assign({}, metadata);
copyMetadata['scal-etag'] = aggregateETag;
const copyParams = {
Bucket: params.Bucket,
Key: params.Key,
Metadata: copyMetadata,
MetadataDirective: 'REPLACE',
CopySource: `${params.MPU}/${copySource}`,
ContentType: res.ContentType,
CacheControl: res.CacheControl,
ContentEncoding: res.ContentEncoding,
ContentDisposition: res.ContentDisposition,
ContentLanguage: res.ContentLanguage,
};
logger.trace('copyParams', { copyParams });
this.retryCopy(copyParams, (err, res) => {
if (err) {
logHelper(logger, 'error', 'error in ' +
'createMultipartUpload - final copyObject',
err);
return next(err);
}
const mpuResult = {
Bucket: params.Bucket,
Key: params.Key,
VersionId: res.VersionId,
ETag: `"${aggregateETag}"`,
};
return this.service.headObject({
Bucket: params.Bucket,
Key: params.Key,
VersionId: res.VersionId,
}, (err, res) => {
if (err) {
logHelper(logger, 'error', 'error in ' +
'createMultipartUpload - final head object',
err);
return next(err);
}
mpuResult.ContentLength = res.ContentLength;
return next(null, mpuResult);
});
});
},
], callback);
}
}
module.exports = MpuHelper;

View File

@ -0,0 +1,11 @@
const { getPutTagsMetadata } = require('../GcpUtils');
function putObject(params, callback) {
const putParams = Object.assign({}, params);
putParams.Metadata = getPutTagsMetadata(putParams.Metadata, params.Tagging);
delete putParams.Tagging;
// error handling will be by the actual putObject request
return this.putObjectReq(putParams, callback);
}
module.exports = putObject;

Some files were not shown because too many files have changed in this diff Show More