Compare commits

...

263 Commits

Author SHA1 Message Date
bert-e 6d73c30597 Merge branch 'w/7.5/improvement/S3C-2352-install-yarn' into tmp/octopus/w/8.0/improvement/S3C-2352-install-yarn 2019-07-29 22:58:52 +00:00
bert-e bbe1e7e87d Merge branch 'improvement/S3C-2352-install-yarn' into tmp/octopus/w/7.5/improvement/S3C-2352-install-yarn 2019-07-29 22:58:51 +00:00
bert-e 40aa7d836f Merge branch 'w/7.5/feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/8.0/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:38 +00:00
bert-e 98737a69ba Merge branch 'feature/S3C-2216-bump-tags-limit' into tmp/octopus/w/7.5/feature/S3C-2216-bump-tags-limit 2019-07-26 23:34:37 +00:00
bert-e 279f08c870 Merge branch 'feature/S3C-2346-bucket-policy-routes' into tmp/octopus/w/8.0/feature/S3C-2346-bucket-policy-routes 2019-07-26 17:14:33 +00:00
Dora Korpar 94653a14c4 ft: S3C-2346 add bucket policy routes 2019-07-25 15:28:55 -07:00
anurag4dsb 8c664d9076
Merge remote-tracking branch 'origin/w/7.5/bugfix/S3C-2335-fixDataServerCloseSync' into w/8.0/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 16:16:06 -07:00
bert-e 0f53c78ccd Merge branch 'bugfix/S3C-2335-fixDataServerCloseSync' into tmp/octopus/w/7.5/bugfix/S3C-2335-fixDataServerCloseSync 2019-07-17 23:13:17 +00:00
Rahul Padigela dd6fde61bb Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2127-upgrade-node' into w/8.0/improvement/S3C-2127-upgrade-node 2019-06-27 16:06:24 -07:00
bert-e b03f5b80ac Merge branch 'improvement/S3C-2127-upgrade-node' into tmp/octopus/w/7.5/improvement/S3C-2127-upgrade-node 2019-06-27 22:59:58 +00:00
bert-e e6ddad1193 Merge branch 'w/7.5/bugfix/S3C-2172-bucket-error' into tmp/octopus/w/8.0/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:47 +00:00
bert-e 933dc1da17 Merge branch 'bugfix/S3C-2172-bucket-error' into tmp/octopus/w/7.5/bugfix/S3C-2172-bucket-error 2019-05-22 23:59:46 +00:00
Jianqin Wang 9da1a8e1f7 Update package-lock.json file with ioredis 4.9.5 upgrade 2019-05-20 16:16:11 -07:00
Jianqin Wang 700cb4eb48 Merge remote-tracking branch 'origin/w/7.5/improvement/S3C-2034-bump-ioredis' into w/8.0/improvement/S3C-2034-bump-ioredis 2019-05-20 14:52:03 -07:00
bert-e ae8dd1bb0e Merge branch 'improvement/S3C-2034-bump-ioredis' into tmp/octopus/w/7.5/improvement/S3C-2034-bump-ioredis 2019-05-20 21:49:24 +00:00
Jonathan Gramain 2ff9cf866d bugfix: ZENKO-1738 bucket names with period trimmed by backbeat
Fix mongoclient.ListRecordStream to properly pass on bucket names with
periods (like foo.bar), instead of truncating the name after the first
period.

Down the line, that fixes replication for objects contained in such
buckets.
2019-04-23 16:54:27 -07:00
bert-e b7c84ef7d3 Merge branch 'feature/S3C-2031/kmip-arsenal-errors' into tmp/octopus/w/8.0/feature/S3C-2031/kmip-arsenal-errors 2019-03-14 23:11:34 +00:00
Guillaume Gimenez c6e06cc235 feature: S3C-2031: KMIP uses arsenal errors 2019-03-14 16:10:50 -07:00
bert-e b9c419dde7 Merge branches 'w/8.0/feature/S3C-2002-admin-service' and 'q/722/7.5/feature/S3C-2002-admin-service' into tmp/octopus/q/8.0 2019-03-08 00:17:05 +00:00
bert-e 57c971ef0f Merge branches 'w/7.5/feature/S3C-2002-admin-service' and 'q/722/7.4/feature/S3C-2002-admin-service' into tmp/octopus/q/7.5 2019-03-08 00:17:05 +00:00
bert-e 226088c8fb Merge branch 'w/7.5/feature/S3C-2002-admin-service' into tmp/octopus/w/8.0/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
bert-e d8320da1bb Merge branch 'w/7.4/feature/S3C-2002-admin-service' into tmp/octopus/w/7.5/feature/S3C-2002-admin-service 2019-03-07 19:29:21 +00:00
bert-e 8f0cab8d91 Merge branch 'w/7.5/bugfix/S3C-2017-berte-fix' into tmp/octopus/w/8.0/bugfix/S3C-2017-berte-fix 2019-03-07 18:15:10 +00:00
bert-e d5d6243c01 Merge branch 'bugfix/S3C-2017-berte-fix' into tmp/octopus/w/7.5/bugfix/S3C-2017-berte-fix 2019-03-07 18:15:10 +00:00
Guillaume Gimenez 87103f83e1 Merge remote-tracking branch 'origin/feature/S3C-1968/kmip-highlevel-driver' into w/8.0/feature/S3C-1968/kmip-highlevel-driver 2019-03-01 16:53:03 -08:00
Guillaume Gimenez 7fb16cbca6 feature: S3C-1968: usage of deprecated Buffer ctor 2019-03-01 16:50:21 -08:00
Guillaume Gimenez 2a8a5dcb94 feature: S3C-1968: Loopback Test KMIP Server 2019-03-01 16:50:21 -08:00
Guillaume Gimenez ff5d62f7de feature: S3C-1968: KMIP High Level Cloudserver Driver 2019-03-01 16:50:21 -08:00
bert-e f4d4c9b76e Merge branches 'w/8.0/feature/S3C-1967/kmip-lowlevel-driver' and 'q/705/7.5/feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/q/8.0 2019-03-02 00:46:12 +00:00
bert-e 97035596e1 Merge branch 'feature/S3C-1967/kmip-lowlevel-driver' into q/7.5 2019-03-02 00:46:12 +00:00
bert-e f11d6e223d Merge branch 'w/7.5/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/8.0/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:51 +00:00
bert-e 8c19dcdc7c Merge branch 'w/7.4/bugfix/S3C-2006-listing-filter-value-fix' into tmp/octopus/w/7.5/bugfix/S3C-2006-listing-filter-value-fix 2019-03-01 19:23:51 +00:00
bert-e 0144158a37 Merge branch 'feature/S3C-1967/kmip-lowlevel-driver' into tmp/octopus/w/8.0/feature/S3C-1967/kmip-lowlevel-driver 2019-02-28 20:49:53 +00:00
Guillaume Gimenez cae763669b feature: S3C-1967: KMIP Low Level Driver 2019-02-28 12:03:14 -08:00
bert-e daaeb5637a Merge branch 'feature/S3C-1966/kmip-tls-transport' into tmp/octopus/w/8.0/feature/S3C-1966/kmip-tls-transport 2019-02-28 19:50:12 +00:00
Guillaume Gimenez b3598c5d0e feature: S3C-1966: KMIP TLS Transport 2019-02-28 11:49:22 -08:00
Jonathan Gramain 9fe0ba5c8c bugfix: ZENKO-1522 helper ObjectMD.isMultipartUpload()
Created this helper to check what kind of CRR to execute depending on
if the object is a MPU or not.
2019-02-21 17:53:45 -08:00
bert-e ac365eef18 Merge branch 'feature/S3C-1925/kmip-ttlv-codec' into q/7.5 2019-02-22 00:49:09 +00:00
bert-e 6a4784417f Merge branches 'w/8.0/feature/S3C-1925/kmip-ttlv-codec' and 'q/641/7.5/feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/q/8.0 2019-02-22 00:49:09 +00:00
bert-e 0d33e5a69f Merge branch 'feature/S3C-1925/kmip-ttlv-codec' into tmp/octopus/w/8.0/feature/S3C-1925/kmip-ttlv-codec 2019-02-22 00:31:26 +00:00
Guillaume Gimenez f7aa22f9a6 feature: S3C-1925: KMIP TTLV Codec 2019-02-21 16:27:24 -08:00
bert-e 23d406dc81 Merge branch 'w/7.5/bugfix/S3C-1985/listing-filter-value' into tmp/octopus/w/8.0/bugfix/S3C-1985/listing-filter-value 2019-02-19 23:45:28 +00:00
bert-e 40a802b715 Merge branch 'feature/S3C-1561-accountQuotas' into tmp/octopus/w/8.0/feature/S3C-1561-accountQuotas 2019-01-24 21:10:53 +00:00
Giacomo Guiulfo b5fa54ec11 bugfix(DataFileStore): add noCache option 2019-01-07 16:20:55 -08:00
Guillaume Gimenez 39f2a53beb ft: ZENKO-1389: md proxy parallel route 2018-12-18 11:41:37 -08:00
Guillaume Gimenez 30c3ce1e2b ft: ZENKO-1384: md proxy healthcheck 2018-12-13 17:30:25 -08:00
Dora Korpar 9a009746be Merge remote-tracking branch 'origin/bugfix/S3C-1678-ipv6' into w/8.0/bugfix/S3C-1678-ipv6 2018-11-15 16:15:14 -08:00
Jonathan Gramain d620fef517 bf: ZENKO-1175 fix when no saved ID exists
In case where there is no saved ID yet (initial deployment), do
process the very first entry in the log instead of skipping it. In
practice it should not have an impact because the very first entry in
the log is normally not due to be processed for CRR, but it ensures
correctness.
2018-11-08 14:46:04 -08:00
Jonathan Gramain 8ac3cf5548 ft: ZENKO-1175 tailable cursor to consume mongo oplog
Use a tailable custor to keep ordering guarantees for the records we
read. This also means we have to read from the beginning when we
reconnect (at startup), and start processing when we encountered the
unique ID previously stored in zookeeper.

Also removed dispatcher mode with MongoLogReader (was only used for
the short-lived Federation deployment of Zenko).
2018-11-08 14:45:07 -08:00
Jonathan Gramain 18dfc6b4fa Merge remote-tracking branch 'origin/feature/S3C-1640-CRRRetryBackport' into w/8.0/feature/S3C-1640-CRRRetryBackport 2018-10-19 17:31:22 -07:00
Rahul Padigela 9fe16c64fa Merge remote-tracking branch 'origin/improvement/bumpArsenal' into w/8.0/improvement/bumpArsenal 2018-10-15 14:57:30 -07:00
vrancurel 3dee6e2d0b bugfix: manage the 'close' event on dataStream
When the underlying socket of the dataStream is closed this
  is not considered as a stream error. So we have to hook the
  event and do the cleanup by ourselves
2018-10-15 11:13:47 -07:00
vrancurel 3545eb4d62 bugfix: close socket on client error
When receiving this callback, sometimes the socket is already
  closed (e.g. upon RST) but sometimes we have to cloud it ourselves.
2018-10-15 11:13:47 -07:00
Dora Korpar 0a85eeb8b7 manual edit: add metastore changes 2018-09-25 16:17:42 -07:00
Dora Korpar 83759870f2 Merge remote-tracking branch 'origin/feature/S3C-1171-listv2' into w/8.0/feature/S3C-1171-listv2 2018-09-25 16:16:51 -07:00
Alexander Chan 0d4bf3c17f ft: ZENKO-1186 stalled sanity check 2018-09-20 15:40:07 -07:00
Alexander Chan 0117b39dcf bf: ZENKO-1155 add index restriction for mongo find call 2018-09-15 19:27:56 -07:00
Bennett Buchanan 549ca1f683 bugfix: ZENKO-1144 Update route and private method 2018-09-15 10:03:44 -07:00
bert-e e4a66343fb Merge branch 'bugfix/ZENKO-1144-fixSortedSetHelper' into q/8.0 2018-09-14 21:32:15 +00:00
philipyoo a89fdde6fd bf: ZENKO-1144 fix ttl of sorted set expires
Changes in this commit:
- Fix TTL Redis#expire from ms to secs
2018-09-14 14:29:55 -07:00
philipyoo 872a2d88e5 bf: ZENKO-1144 remove redis scan in crr metrics
Changes in this commit:
- Remove use of Redis#scan. Instead build query strings
  manually
2018-09-14 08:51:40 -07:00
philipyoo 0c9c462634 bf: ZENKO-1144 add sorted set support StatsModel
Changes in this commit:
- Helper method _normalizeTimestampByHour normalizes date to
  nearest hour
- Helper method _setDateToPreviousHour sets date back 1 hour
- method getSortedSetHours returns list of 24 normalized
  hourly timestamps
- method getSortedSetCurrentHour returns normalized
  hourly timestamp based on epoch passed
- method addToSortedSet adds to a sorted set and applies
  expiry if adding to new sorted set
2018-09-14 07:40:30 -07:00
philipyoo a3973ac7d3 ft: ZENKO-1144 redis wrapper for sorted sets
Changes in this commit:
- Add wrapper for Redis sorted set methods: ZADD, ZCARD,
  ZRANGE, ZRANGEBYSCORE, ZREM, ZSCORE
- Add wrapper for Redis methods: EXISTS
2018-09-13 18:28:48 -07:00
bert-e d1a8693fe5 Merge branch 'bugfix/ZENKO-1124-mongo-listing-loop' into q/8.0 2018-09-11 01:02:10 +00:00
Jeremy Desanlis 5687a48599 ZENKO-1124: mongo listing, avoid to loop 2018-09-10 17:07:01 -07:00
Nicolas Humbert 9dca871e1b fx: ZENKO-1112 Management client error logging 2018-09-07 10:04:26 -07:00
philipyoo 7088812c80 bf: ZENKO-1024 fix fail metrics in all route
All metrics function will query redis once for all data.
With the change to failure metrics, we want to pass
the request details object to the getFailedMetrics fxn
2018-09-04 08:48:05 -07:00
philipyoo 9f742d4921 bf: ZENKO-1024 use pending metrics for backlog
Pending metrics don't expire which was a cause for problems
with current backlog. This quick fix is to use pending
metrics in place of backlog but keeping the same names
and routes in place to avoid regression.
2018-08-31 17:19:14 -07:00
bert-e 2c31728905 Merge branch 'bugfix/ZENKO-1024/add-global-counters' into q/8.0 2018-08-24 22:06:46 +00:00
Bennett Buchanan 125ccbbfa9 bugfix: ZENKO-1024 Add pending counters 2018-08-24 14:28:36 -07:00
bert-e 40c8b37b30 Merge branch 'feature/ZENKO-1019-cancelScheduleResume' into q/8.0 2018-08-23 22:59:12 +00:00
bert-e 879075e4ec Merge branch 'bugfix/ZENKO-945-delimitermaster-filter' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-filter 2018-08-22 23:46:28 +00:00
philipyoo 79ed68ce9f ft: ZENKO-1019 add cancel scheduled resume route 2018-08-22 13:47:37 -07:00
bert-e cbfacb5ec0 Merge branch 'bugfix/ZENKO-945-delimitermaster-test' into tmp/octopus/w/8.0/bugfix/ZENKO-945-delimitermaster-test 2018-08-20 23:40:54 +00:00
philipyoo 06dfdd9612 rf: use single StatsModel, use explicit var names
Changes in this commit:
- Remove `OBJECT_MONITORING_EXPIRY` and use `EXPIRY` instead
  as values are now same
- Use single instance of `StatsModel`
- Remove extra interval in `StatsModel` expiry field. Not
  needed anymore as throughput uses a 15 minute window and
  the extra interval for it will be available by default
- Use explicit variable names when data is fetched from
  `StatsClient`
2018-08-08 14:53:30 -07:00
philipyoo bf95506495 ft: ZENKO-925 increase crr metrics expiry to 24hrs
Changes reflected in this commit:
- Increase metrics expiry, but keep throughput to 15 minute
  averages.
- Add helper method `_getMaxUptime` to find # of intervals
- Update tests to reflect the extra intervals fetched from
  Redis/StatsModel
2018-08-08 14:53:30 -07:00
Alexander Chan db743f8269 improvement: version increase 2018-08-08 10:03:19 -07:00
Alexander Chan a2311bb69c bf: ZENKO-922 add redis disconnect method
Adds disconnect method to allow closing of the backbeat metrics redis
client
2018-08-06 17:52:24 -07:00
Alexander Chan c8f323237f bf: ZENKO-903 retrieve new bucket list on report 2018-08-06 13:07:16 -07:00
Rahul Padigela 5cf55fcb68 improvement: update package-lock version 2018-08-01 17:16:23 -07:00
Rahul Padigela de94a0e62e improvement: update test to adjust to nodejs 8
Buffer.from no longer throws errors if most of the string contains a valid hex.
Since the test is testing if an error is thrown for invalid hex, the test has been
updated to do the same.
2018-08-01 17:12:29 -07:00
Rahul Padigela 2b13994795 improvement: run tests in eve with nodejs 8 2018-08-01 16:25:59 -07:00
Rahul Padigela 769a461178 improvement: move metrics tests to functional 2018-08-01 16:24:40 -07:00
Rahul Padigela c11fc1d9d8 bugfix: ZENKO-898 install node-fcntl module 2018-08-01 15:59:11 -07:00
bert-e b8ad86a1f1 Merge branch 'feature/ZENKO-785-add-checkHealth-to-mongodb' into q/8.0 2018-07-31 19:04:56 +00:00
Giacomo Guiulfo 12c4df722b feat: add checkHealth to mongodb interface 2018-07-31 11:40:55 -07:00
bert-e f566e32322 Merge branch 'bugfix/ZENKO-751-setMaxObjectKeyLimit' into q/8.0 2018-07-30 17:17:11 +00:00
philipyoo 6413c92fbc bf: ZENKO-751 tempfix set max object key limit
Max key length will be set to 915 to account for different
situations. Default AWS key size is 1024, but mongo keys
allow for bytes of up to 1012. Factoring in version id,
and bucket match false (bucket name prefix), for now,
we will limit the key size to 915 and return an error
right away if object key byte size exceeds this limit.
2018-07-27 14:40:14 -07:00
bert-e 29182cce05 Merge branch 'bugfix/ZENKO-763-objectTagsAreNotReplicated' into q/8.0 2018-07-27 18:40:17 +00:00
Jonathan Gramain 9fb5b8b10d bf: ZENKO-763 rework mongo log consumer
- process individual mongo log entry types separately ('i', 'u',
  'd'). This is the main fix required to process updates coming from
  put-object-tagging or ACLs

- fix usage of uniqID:

  - previously it was ignored due to a typo (uniqId instead of
    uniqID), which meant we still processed multiple times entries
    from the same second

  - with typo fixed, it requires another change to make it useful:
    we have to emit the 'info' event at the end of the batch so that
    the last unique ID is presented

  - cleaner serialization of timestamp+uniqID: use JSON rather than
    custom parsing and pass it as an opaque string in info.end

- correct use of stream functions (e.g. end() was masked by a local
  variable called "this.end", fixed by prefixing private members with
  "_")

- fix timestamp output: do not use private member _high from
  Timestamp, use toNumber()

- fix mongo log flow control when reading from mongo log by using
  pipe() instead of just calling write() then end() so we don't
  bufferize contents unnecessarily.

- removed some unnecessary special case handling and 'this.push()'
  calls

Add unit tests to check ListRecordStream with known mongo log entry
types (which required moving the class in a separate file from
LogConsumer.js)
2018-07-25 18:06:06 -07:00
vrancurel 5631a892c6 bugfix: temporary fix for the s3-data pod ballooning issue
that consists in disabling file level caching for the files
  we store in the file data backend.
2018-07-25 16:17:16 -07:00
Rahul Padigela dfcdea46fc improvement: ZENKO-760 use callback instead of throw
This lets CloudServer handle MongoClient issues more gracefully
2018-07-20 17:21:36 -07:00
Rahul Padigela be02e59bfe bugfix: ensure setup callback is called 2018-07-20 16:51:17 -07:00
Rahul Padigela fdbeed1c4e improvement: ZENKO-760 add connection timeout for monogoclient
Mongoclient checks each node in the replica set to see which one's the primary,
this check has a default timeout of 30s which delays the startup of Cloudserver
when one of the nodes is unavailable. Cutting down the timeout makes it go through the
list of nodes in the replica set quicker to find the primary. MONGO_CONNECT_TIMEOUT_MS env
var is introduced to adjust the timeout in deployments.
2018-07-20 14:30:38 -07:00
bert-e 91fbc3fd23 Merge branch 'bugfix/ZENKO-642-multipleLifecycleConfigTags' into q/8.0 2018-07-17 20:42:38 +00:00
philipyoo 241338bcfa bf: apply multiple lifecycle filter tags if exists 2018-07-17 13:22:16 -07:00
Rached Ben Mustapha 6db80e9411 bf: return timely on data diskUsage subresource 2018-07-17 10:46:40 -07:00
bert-e d701352635 Merge branch 'bugfix/ZENKO-693/fixNegativeValues' into q/8.0 2018-07-10 20:02:18 +00:00
Alexander Chan b291ccc03f bf: ZENKO-693 clamp negative values to 0 2018-07-10 12:11:16 -07:00
Bennett Buchanan 0426f44dee bugfix: ZENKO-621 Make _buildKey public method 2018-07-10 11:34:47 -07:00
Rahul Padigela 1b9242788a bugfix: ZENKO-632 check if destroy method is available
s3-data returns Readable unlike sproxydclient which returns an instance of
http.IncomingMessage which implements Readable stream and extends it with
destroy method
2018-07-06 15:30:38 -07:00
Bennett Buchanan 1a2ea2f353 feature: ZENKO-483 Update Redis key schema 2018-07-05 15:31:01 -07:00
Bennett Buchanan c36280a6e8 feature: ZENKO-483 Monitor CRR upload 2018-07-05 10:51:48 -07:00
bert-e c749725410 Merge branch 'bugfix/ZENKO-579-skip-scan-fix' into q/8.0 2018-07-01 21:30:53 +00:00
Alexander Chan 3d06ec6230 bf: ZENKO-625 fix mongo aggregate params 2018-07-01 12:03:46 -07:00
Jonathan Gramain 159ebb4283 bugfix: ZENKO-433 fix when 'params' is undefined 2018-06-30 19:20:55 -07:00
Alexander Chan e17333b19e ft: ZENKO-597 account for transient source in TDM 2018-06-30 15:11:12 -07:00
philipyoo b3b22292c4 ft: ZENKO-584 add failed CRR metrics route 2018-06-30 08:14:38 -07:00
bert-e 68d27ed5bf Merge branch 'bugfix/ZENKO-603/mongoItemCount' into q/8.0 2018-06-30 04:58:41 +00:00
bert-e 1e79964253 Merge branch 'feature/ZENKO-239-scheduleResumeRoutes' into q/8.0 2018-06-29 22:54:40 +00:00
philipyoo 5f76343c2e ft: ZENKO-239 add schedule resume routes 2018-06-29 15:13:50 -07:00
Alexander Chan d907c9942d bf: use bucketName instead of c.s.name 2018-06-29 12:52:21 -07:00
Alexander Chan c63b0713c0 bf: add more tests 2018-06-29 12:50:49 -07:00
Alexander Chan 6a9a88800a rf: use mongodb aggregate method for item count 2018-06-29 11:56:30 -07:00
Dora Korpar 5834f15397 ft: ZENKO-582 preferred read location
Add preferred read location specification in replication configuration

E.g. <StorageClass>aws,gcp:preferred_read</StorageClass>
2018-06-28 14:31:35 -07:00
bert-e b50f6c4678 Merge branch 'feature/ZENKO-583-crrStatusRoute' into q/8.0 2018-06-28 17:20:54 +00:00
bert-e edeab02107 Merge branch 'feature/pensieve-stats' into q/8.0 2018-06-28 17:17:43 +00:00
David Pineau c64cccdf55 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-28 18:51:07 +02:00
vrancurel af2b3a4bc3 bugfix: fix versioningGeneral2 test failing with Mongo
When an object has been created without versioning and
the versioning has been enabled, when creating a version
we must consider the case that the object doesn't have
the versionId property.
2018-06-27 18:38:28 -07:00
philipyoo 1e9ad08830 ft: ZENKO-583 add crr status check route 2018-06-27 17:20:11 -07:00
David Pineau 9e66fda610 Merge remote-tracking branch 'origin/development/7.4' into development/8.0 2018-06-27 18:42:41 +02:00
Rahul Padigela 888e154f0e Merge remote-tracking branch 'origin/feature/ZENKO-267-Routes-MD-Ingestion' into w/8.0/feature/ZENKO-267-Routes-MD-Ingestion 2018-06-26 17:22:02 -07:00
Nicolas Humbert 8448f909e4 FT: push isVersioned and ownerCanonicalId stats 2018-06-26 17:07:16 -07:00
bert-e 2b16e84733 Merge branch 'feature/ZENKO-235-manualPauseResumeRoutes' into q/8.0 2018-06-27 00:00:25 +00:00
philipyoo a1a6f65364 ft: add crr pause/retry/status routes 2018-06-26 16:55:11 -07:00
bert-e 7cf0c97d8e Merge branch 'feature/ZENKO-437_Backbeat_Readiness_Probes' into q/8.0 2018-06-26 23:26:20 +00:00
Taylor McKinnon 10e7b976d5 feat(zenko-437): Add HealthProbeServer 2018-06-26 15:56:54 -07:00
vrancurel e80ea95ad8 bugfix: fix skip scan on Mongo
This allows to skip scans when it is too long to jump
over a prefix. Also it has the side effect of batching
more common prefixes in one s3 list call with delimiter
2018-06-26 14:08:18 -07:00
Jeremy Desanlis 7075318dd2 bf: ZENKO-578 mongoDB error replies
Do not raise an internalError to upper layer when mongoDB fails to
update the master version with a specific error code. This fix is
related to the mongoDB issue: SERVER-19600.

This commit fixes too the message field name of the mongoDB error, it is
'errmsg' and not 'message'.
2018-06-26 14:00:08 -07:00
bert-e 38f68fba1a Merge branch 'bugfix/ZENKO-308-listversion' into q/8.0 2018-06-25 17:45:24 +00:00
vrancurel 16f9a6f5f6 bugfix: list version is incorrect because sometimes
we replace the master with an incorrect last version
 because inserts are sometimes swapped. Add a check
 to be sure we always replace the master with a
 smaller (thus more recent) version.
2018-06-25 09:28:34 -07:00
bert-e c48e4b89bd Merge branch 'feature/ZENKO-315/CRRWithoutVersioning' into q/8.0 2018-06-23 00:00:50 +00:00
Bennett Buchanan 2a8169e936 feature: ZENKO-315 Add NFS properties 2018-06-22 14:02:39 -07:00
Alexander Chan 1af67fffc7 bf: fix mongo counter 2018-06-21 19:58:42 -07:00
Guillaume Gimenez e9ac11b1fe ft: ZENKO-561: bucket attributes handling fixed
on putBucketAttributes and getBucketAttributes
2018-06-20 15:54:43 -07:00
bert-e 30dcd6ef86 Merge branch 'feature/ZENKO-433/countIncUpdateandRefresh' into q/8.0 2018-06-20 22:05:49 +00:00
Alexander Chan 2ce9db4e01 ft: ZENKO-433 add item count support incremental update and refresh 2018-06-20 10:00:57 -07:00
philipyoo 9e234e2b41 bf: zero-fill response for getAllStats 2018-06-13 15:05:38 -07:00
philipyoo 83a831f512 rf: edit monitoring route details 2018-06-13 14:11:45 -07:00
Guillaume Gimenez 32c2a6fe99 FT: Metadata Proxy Server 2018-06-13 10:06:05 -07:00
Rahul Padigela 063361377c chore: update version and dependencies 2018-05-30 16:44:17 -07:00
Rahul Padigela ea7f28c82d
Merge pull request #495 from scality/fwdport/z/1.0-master
Fwdport/z/1.0 master
2018-05-30 08:32:59 -07:00
Rahul Padigela a9e760b32e chore: use correct dependency branches 2018-05-29 17:01:49 -07:00
Rahul Padigela 3b16a307b8 Merge remote-tracking branch 'origin/z/1.0' into fwdport/z/1.0-master 2018-05-29 16:52:11 -07:00
Rahul Padigela f8dfa378a1
Merge pull request #494 from scality/bf/ZENKO-370-restoreMongoOpLogFilteringPerDb
restore mongo op log filtering per db
2018-05-29 09:28:54 -07:00
Jonathan Gramain e16eadb474 bf: ZENKO-370 restore mongo oplog db filtering
For Orbit that has multiple instances per mongo database. This change
restores the filtering per db, but keeps publishing the internal DBs
that have '__' in their name.

Also attempt to fix the original regexp which was matching the '$DB.'
pattern at any place, not at the beginning of the 'ns' field.
2018-05-25 14:54:21 -07:00
Rahul Padigela 5bf7fef53c
Merge pull request #491 from scality/bf/ZENKO-355-byteToMBConversion
bf: ZENKO-355 crr stats byte conversion
2018-05-22 15:00:45 -07:00
philipyoo 659aee2fc2 bf: fix/change byte conversion 2018-05-21 16:07:15 -07:00
Rahul Padigela bde52ab89b
Merge pull request #492 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated-fixInfoEvent
bf: ZENKO-344 don't shunt 'info' event production
2018-05-15 16:24:13 -07:00
Jonathan Gramain 0ddb4da8a9 bf: ZENKO-344 don't shunt 'info' event production
Make sure the mongo consumer produces the 'info' event from the LogConsumer
when the lastEndID has not been reached yet.
2018-05-14 17:31:05 -07:00
Rached Ben Mustapha 56e280236b
Merge pull request #490 from scality/fix/ZENKO-346-no-crash-on-empty-stats
fix: do not crash on empty backbeat stats
2018-05-11 15:32:04 -07:00
Rached Ben Mustapha f904f04401 fix: do not crash on empty backbeat stats 2018-05-11 11:51:12 -07:00
Rahul Padigela db45fee9e8
Merge pull request #487 from scality/bf/ZENKO-344-mongoLogEntriesDuplicated
Bf/zenko 344 mongo log entries duplicated
2018-05-11 10:16:27 -07:00
JianqinWang ecc431c715 bf: typo in oplogReplay 2018-05-11 10:12:03 -07:00
JianqinWang 6f694ae7f4 bf: ZENKO-344 Fix duplicate mongo logs 2018-05-11 10:12:03 -07:00
Rahul Padigela e7862d3922
Merge pull request #489 from scality/bf/ZENKO-343-dontFilterInternalMongoNs
bf: ZENKO-343 remove regexp-based 'ns' filtering
2018-05-10 21:16:59 -07:00
Jonathan Gramain de7ebf70d7 bf: ZENKO-343 remove regexp-based 'ns' filtering
Don't filter internal namespace entries from mongo log, as backbeat
need the metastore entries exposed to process lifecycle entries.
2018-05-10 16:00:07 -07:00
Rahul Padigela 1425f03c1e
Merge pull request #486 from scality/bf/ZENKO-323-relaxMongoCountError
bf: ZENKO-323 relax mongo count error
2018-05-10 09:55:20 -07:00
Alexander Chan ad527911a2 bf: ZENKO-323 relax mongo count error 2018-05-08 17:18:26 -07:00
Rahul Padigela 6c528688ee
Merge pull request #485 from scality/back-porting-master
FX: constructing v4 query auth signature with proxyPath
2018-05-08 15:03:17 -07:00
Nicolas Humbert e53aa2efd2 FX: constructing v4 query auth signature with proxyPath
(cherry picked from commit 160b960607)
2018-05-08 14:51:13 -07:00
Rahul Padigela 873bc9b647
Merge pull request #479 from scality/ft/proxy
FX: constructing v4 query auth signature with proxyPath
2018-05-08 12:01:12 -07:00
Nicolas Humbert 160b960607 FX: constructing v4 query auth signature with proxyPath 2018-05-08 11:55:24 -07:00
Rahul Padigela 843bd1fe13
Merge pull request #484 from scality/bf/ZENKO-314-removeMongoCursorLimit
bf: ZENKO-314 remove mongo cursor limit
2018-05-07 18:40:28 -07:00
Alexander Chan 93a2a79699 bf: remove mongo cursor limit
removed the mongo cursor hard coded limit as that introduced undesired
behavior with small max-keys
2018-05-07 17:58:07 -07:00
Rahul Padigela ef32d5e94d
Merge pull request #481 from scality/bf/mongo-cursor-limit
bf: fix mongo cursor limit
2018-05-03 12:18:01 -07:00
Alexander Chan 45d9c3d999 bf: fix mongo cursor limit
use hard coded limit for the cursor limit
2018-05-02 19:30:39 -07:00
Rahul Padigela a2ce46d8d0
Merge pull request #478 from scality/ft/Zenko-21/prom-route
Zenko-21: FT: New Route for Prometheus Client
2018-05-02 12:38:34 -07:00
anurag4DSB 0c0bffa2c3 ft: ZENKO-21 add prometheus monitoring route
Signed-off-by: anurag4DSB <anurag.213@gmail.com>
2018-05-02 12:29:57 -07:00
ironman-machine d966c0bda9 merge #477 2018-05-02 01:36:01 +00:00
Rahul Padigela cb86a857cc
Merge pull request #476 from scality/bf/mongo-list-object-limit
Bf/mongo list object limit
2018-05-01 16:56:13 -07:00
Alexander Chan 55c9441bd7 bf: add mongo list-object max limit 2018-05-01 10:32:01 -07:00
David Pineau cae55a65c8 ft: interpret healthcheck status from bucketd
As discussed in S3C-1412, it is necessary for S3 to interpret Bucketd health
status in order to provide more flexibility (relatively to the health check
mechanism) than failing due to a light partial unavailability of the platform.

This is done in the bucketclient backend's healthcheck method, in order to
comply with all the other backends.

Fixes S3C-1412

Signed-off-by: David Pineau <david.pineau@scality.com>
2018-04-30 15:58:05 -07:00
philipyoo 114cbf5571 bf: use expired interval to avg out throughput
When an interval of data in Redis expires, throughput will
abruptly reduce. Given the data we collect, we can only
calculate the average throughput. To ease the erratic
decrease on expiration of an interval, instead, get the
average of elapsed time for the newest interval and
remaining time of the interval multiplied against the
average of the just-expired interval.

In Redis, we need to save an extra interval to reference
the just-expired data.
2018-04-30 10:23:22 -07:00
Alexander Chan f2bab3b3d6 ft: ZENKO-262 update bucketInfo model 2018-04-30 10:23:22 -07:00
philipyoo 3276d235bb bf: do not include current UploadIdMarker in list
for in_memory only
listMultipartUpload should not list current marker in the
listing. Previously, it would include the marker as the
first item in the list
2018-04-30 10:23:22 -07:00
philipyoo ee2aed10f3 ft: add uid property to all buckets 2018-04-24 10:07:58 -07:00
Rahul Padigela 19bee770ea chore: update scality dependencies 2018-04-23 12:23:58 -07:00
Rahul Padigela e0c5d03436 chore: update version and author 2018-04-23 12:18:42 -07:00
Rahul Padigela c8a7148645
Merge pull request #472 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-04-23 00:07:18 -07:00
Rahul Padigela 8ca5dce4fe Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-04-23 00:03:04 -07:00
Bennett Buchanan 599fb5709b
Merge pull request #470 from scality/rf/S3C-1399/refactor-backbeat-metrics-into-arsenal
rf: S3C-1399 Add Backbeat metrics and routes
2018-04-20 16:30:39 -07:00
Rahul Padigela 1161d5f75d
Merge pull request #471 from scality/fwdport/7.4-7.4-beta
Fwdport/7.4 7.4 beta
2018-04-19 11:04:15 -07:00
Rahul Padigela 26b6c5d1d9 Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-7.4-beta 2018-04-19 11:00:33 -07:00
Bennett Buchanan 8fd50cd20e rf: S3C-1399 Add Backbeat metrics and routes 2018-04-18 16:46:07 -07:00
Rahul Padigela 1f6b5bf2bd
Merge pull request #469 from scality/fix/less-verbose-report
Do not log report requests
2018-04-16 13:18:45 -07:00
Rached Ben Mustapha a7813daea9 Do not log report requests 2018-04-16 11:55:44 -07:00
Rahul Padigela 5d4eb84425
Merge pull request #468 from scality/ft/mongo-caching
add mongo client caching
2018-04-13 17:39:51 -07:00
Alexander Chan 9511fff479 add mongo client bucket/object metrics caching 2018-04-13 17:25:03 -07:00
Rahul Padigela d70f64a6d0
Merge pull request #465 from scality/fx/data-managed-count
fx: correct data managed count
2018-04-13 16:48:01 -07:00
Alexander Chan ee66dc811c fx: correct data managed count
fixes issue with mongoclient countItems
+ accounts for data stored directly to external backend
+ adds check to handle versioned bucket and non-versioned buckets differently
2018-04-13 16:32:43 -07:00
Rahul Padigela 2710471726
Merge pull request #467 from scality/backport/master-rel/7.4-beta
Backport: master to rel/7.4-beta
2018-04-10 17:57:54 -07:00
Dora Korpar 9aee9f6cf0 ft: extract function for date modified headers
(cherry picked from commit 92da4c90e5)
2018-04-10 17:44:22 -07:00
Rahul Padigela a168fab266
Merge pull request #435 from scality/ft/objdel-add-modified-header-check
Ft/objdel add modified header check
2018-04-10 17:30:54 -07:00
Dora Korpar 92da4c90e5 ft: extract function for date modified headers 2018-04-10 17:05:03 -07:00
Rahul Padigela a95d5ea15d
Merge pull request #464 from scality/fix/flaky-mongo
Fixes flakiness in S3 functional tests with mongo backend
2018-04-07 22:16:56 -07:00
Salim aad05faa12 Fixes flakiness in S3 functional tests with mongo backend 2018-04-06 17:27:24 -07:00
Rahul Padigela ab230ebfe7
Merge pull request #463 from scality/fix/mongo-tests
Fix/mongo tests
2018-04-06 16:48:53 -07:00
Salim b3103e1307 ZENKO-227 fix: mongodb versioning
This fixes the problem where if the version ID is passed, it will cause
an Internal error failure because it was trying to create a new object
with the same key value. This adds a check to see if the object exists
first then updates and upserts accordingly.
2018-04-06 15:41:09 -07:00
Salim f3b0091210 feat: add error KeyAlreadyExists 2018-04-06 11:19:37 -07:00
Rahul Padigela f633b91072
Merge pull request #460 from scality/ft/add-data-managed
ft; add data managed metrics
2018-04-06 10:47:06 -07:00
Alexander Chan 87807462dc ft; add data managed metrics 2018-04-05 10:17:26 -07:00
Rahul Padigela d7f114d504
Merge pull request #461 from scality/fix/skip-mpu-bucket-prefix
Skip MPU shadow buckets
2018-04-03 14:42:28 -07:00
Rached Ben Mustapha 5ef168e654 Skip MPU shadow buckets 2018-04-03 13:20:57 -07:00
Rahul Padigela 82b4055c6c
Merge pull request #459 from scality/fix/stuck-replication
Fix/stuck replication
2018-04-02 14:55:15 -07:00
Rached Ben Mustapha 91ccccfe85 Remove use of global variable 2018-04-02 14:49:04 -07:00
Rached Ben Mustapha 696999874b Fix replication stream getting stuck
The mongodb transform stream would never actually emit any objects
to the extensions.
2018-04-02 14:31:54 -07:00
Rached Ben Mustapha d2bed3bf9a Un-hardcode mongodb database name 2018-04-02 14:31:07 -07:00
Rahul Padigela ad42baa5ff
Merge pull request #458 from scality/fix/mongologreader-contract
Fix/mongologreader contract
2018-04-02 12:07:20 -07:00
Rached Ben Mustapha 6ac92b2ad2 Fix mongodb log consumer initial values
Pre-existing LogConsumer contract uses `null` for initial values,
`undefined` breaks client code assumptions.
2018-04-02 11:31:02 -07:00
Rahul Padigela 13dbf48867
Merge pull request #457 from scality/ft/initial-instance-id
Ft/initial instance
2018-03-30 16:59:11 -07:00
Rached Ben Mustapha e79ad68e96 S3C-1355 Use provided instance id 2018-03-30 16:24:51 -07:00
Rahul Padigela a4a5fe0db0
Merge pull request #456 from scality/ft/ZENKO-147/crr-retry-kafka
FT: Add objectMD setters for replicationInfo
2018-03-30 11:37:22 -07:00
Bennett Buchanan f838fcc31f FT: Add objectMD setters for replicationInfo 2018-03-29 16:27:48 -07:00
VR eb9dd23b14
Merge pull request #455 from scality/ZENKO-222-bf-mongo-url
ZENKO-222 bf: revert mongo url
2018-03-28 18:03:19 -07:00
JianqinWang edbf7ab650 ZENKO-222 bf: revert mongo url 2018-03-28 17:57:59 -07:00
Rahul Padigela e068950903
Merge pull request #453 from scality/forward/orbit
Forward/orbit
2018-03-28 16:25:55 -07:00
Rahul Padigela 1ceb7b264c chore: remove branch version from package.json 2018-03-28 16:03:42 -07:00
vrancurel 5a29aaa10c fixing metadata search broken by commit ea8d523501fcd996447986318e59a95e729563b0 2018-03-28 16:03:42 -07:00
Rahul Padigela 7587f7ba25 ft: update version 2018-03-28 16:03:42 -07:00
Rahul Padigela 795b145594
Merge pull request #452 from scality/add-bson-to-dependencies
add bson to dependencies
2018-03-28 11:16:01 -07:00
Jeremy Desanlis 58f027a693 add bson to dependencies 2018-03-27 18:36:13 -07:00
Rahul Padigela e09348d658
Merge pull request #451 from scality/fwdport/7.4-beta-master
Fwdport/7.4 beta master
2018-03-27 17:22:15 -07:00
Alexander Chan bddb90c6a1 Merge remote-tracking branch 'origin/rel/7.4-beta' into fwdport/7.4-beta-master 2018-03-27 15:49:03 -07:00
Rahul Padigela 94efaaccc2
Merge pull request #446 from scality/ft/S3C-1327-add-bucketinfo-uid-prop
ft: Add uid property to BucketInfo
2018-03-26 07:00:50 -07:00
Rahul Padigela 463a8ebe15
Merge pull request #448 from scality/fwd/7.4-to-7.4-beta
Fwd: 7.4 to 7.4 beta
2018-03-26 07:00:19 -07:00
philipyoo f17ce17857 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-to-7.4-beta 2018-03-23 10:24:36 -07:00
Rahul Padigela 3a5250e2e9
Merge pull request #437 from scality/ft/S3C-1148-statsclient-multiple-ids
Ft/S3C-1148 statsclient multiple ids
2018-03-22 14:56:05 -07:00
ironman-machine 48cb7b3b05 merge #447 2018-03-21 18:44:42 +00:00
Nicolas Humbert 84c4c147a2 FIX: Mongo Client - countItems 2018-03-20 17:54:59 -07:00
Rahul Padigela 958e818655
Merge pull request #445 from scality/fwd/7.4-beta-master
Fwd/7.4 beta master
2018-03-20 14:51:33 -07:00
philipyoo 91dd219c47 ft: Add uid property to BucketInfo
Needed for lifecycle processing in backbeat
2018-03-19 18:56:52 -07:00
Alexander Chan 5f3d478edb Merge remote-tracking branch 'origin/rel/7.4-beta' 2018-03-19 15:49:24 -07:00
Rahul Padigela 04d56cfdff ft: update version number 2018-03-14 13:28:10 -07:00
Rahul Padigela 73dd529c29 ft: update package.json dependencies 2018-03-14 13:08:44 -07:00
philipyoo a9aa40c168 ft: extend statsclient to query by list of ids
Extend support for querying a list of ids and
returning a total sum of results for that list.

Also add wrapper for redis method `keys`
2018-03-14 11:22:15 -07:00
ironman-machine 189194a4e7 merge #433 2018-03-08 20:39:29 +00:00
JianqinWang a9a6b2433d rf: remove use of util.format 2018-03-08 09:22:28 -08:00
JianqinWang fa19fc8859 rf: name change for replica set hosts 2018-03-08 09:22:28 -08:00
JianqinWang a269619698 ZENKO-15 ft: oplog tailer for MongoDB 2018-03-08 09:22:28 -08:00
Rahul Padigela da1da43597
Merge pull request #438 from scality/fwdport/7.4-master
Fwdport/7.4 master
2018-03-08 00:26:46 -08:00
Rahul Padigela caac4e4e7e Merge remote-tracking branch 'origin/rel/7.4' into fwdport/7.4-master 2018-03-07 19:08:15 -08:00
Rahul Padigela 67250133dc
Merge pull request #436 from scality/bf/missing-bucketclient-param
bf: fix missing param needed for BCI
2018-03-06 16:43:35 -08:00
JianqinWang d3f3be03ae bf: fix missing param needed for BCI 2018-03-06 16:39:54 -08:00
ironman-machine 1a9f1afd2c merge #425 2018-03-06 18:25:58 +00:00
JianqinWang 9a5afdbc5c rf: rename mongo replicaset hosts 2018-03-05 17:28:11 -08:00
JianqinWang 83cf54512b ZENKO-140 rf: extract metadata backends from S3 2018-03-05 16:33:38 -08:00
ironman-machine 7e3ad64456 merge #432 2018-02-24 01:27:51 +00:00
Nicolas Humbert eba0cb6116 FT: add proxy_path header 2018-02-22 17:16:43 -08:00
Lauren Spiegel fd23e82ab9
Merge pull request #419 from scality/fix/replaceUpdate
Fix/stopSwallowingErrors
2018-02-14 13:08:05 -08:00
Lauren Spiegel d7cf5e8ccf FIX: Stop swallowing errors 2018-02-14 13:02:22 -08:00
flavien-scality d0f4f95f0d
Merge pull request #417 from scality/fwd/7.4-master
Fwd/7.4 master (try succeeded)
2018-02-14 09:54:16 +01:00
Alexandre Merle 0e606b1061 Merge remote-tracking branch 'origin/rel/7.4' into fwd/7.4-master 2018-02-14 04:24:23 +01:00
ironman-machine 44ead88d83 merge #420 2018-02-13 19:32:13 +00:00
vrancurel d8e1497940 use hosts config instead of host and port 2018-02-12 15:24:51 -08:00
ThibaultRiviere 4193394340
Merge pull request #407 from scality/fwdport_7.4_master
Fwdport 7.4 master
2018-02-07 13:42:22 +01:00
Thibault Riviere 0f1b0dad01 Merge branch 'rel/7.4' into fwdport_7.4_master 2018-02-07 13:34:03 +01:00
ironman-machine 393d6edc07 merge #408 2018-02-06 23:55:05 +00:00
vrancurel 70638eaf7a support search in Mongo 2018-02-06 14:18:51 -08:00
Lauren Spiegel 9d0156dfdf
Merge pull request #403 from scality/welcome/mongo
Welcome/mongo
2018-02-02 15:10:44 -08:00
Lauren Spiegel 8d8028b83f CHORE: Change filename 2018-02-02 12:08:49 -08:00
Lauren Spiegel b99fe2cd8d Changes to client due to move 2018-02-02 12:08:46 -08:00
Lauren Spiegel cc26f288be Move mongoclient from s3 to arsenal 2018-02-02 11:34:45 -08:00
93 changed files with 15258 additions and 130 deletions

View File

@ -387,6 +387,10 @@
"code": 409, "code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists." "description": "The request was rejected because it attempted to create a resource that already exists."
}, },
"KeyAlreadyExists": {
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"ServiceFailure": { "ServiceFailure": {
"code": 500, "code": 500,
"description": "Server error: the request processing has failed because of an unknown error, exception or failure." "description": "Server error: the request processing has failed because of an unknown error, exception or failure."

View File

@ -3,6 +3,7 @@ module.exports = {
constants: require('./lib/constants'), constants: require('./lib/constants'),
db: require('./lib/db'), db: require('./lib/db'),
errors: require('./lib/errors.js'), errors: require('./lib/errors.js'),
errorUtils: require('./lib/errorUtils'),
shuffle: require('./lib/shuffle'), shuffle: require('./lib/shuffle'),
stringHash: require('./lib/stringHash'), stringHash: require('./lib/stringHash'),
ipCheck: require('./lib/ipCheck'), ipCheck: require('./lib/ipCheck'),
@ -12,15 +13,7 @@ module.exports = {
dhparam: require('./lib/https/dh2048.js'), dhparam: require('./lib/https/dh2048.js'),
}, },
algorithms: { algorithms: {
list: { list: require('./lib/algos/list/exportAlgos'),
Basic: require('./lib/algos/list/basic').List,
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
DelimiterVersions: require('./lib/algos/list/delimiterVersions')
.DelimiterVersions,
DelimiterMaster: require('./lib/algos/list/delimiterMaster')
.DelimiterMaster,
MPU: require('./lib/algos/list/MPU').MultipartUploads,
},
listTools: { listTools: {
DelimiterTools: require('./lib/algos/list/tools'), DelimiterTools: require('./lib/algos/list/tools'),
}, },
@ -53,6 +46,12 @@ module.exports = {
RESTClient: require('./lib/network/rest/RESTClient'), RESTClient: require('./lib/network/rest/RESTClient'),
}, },
RoundRobin: require('./lib/network/RoundRobin'), RoundRobin: require('./lib/network/RoundRobin'),
probe: {
HealthProbeServer:
require('./lib/network/probe/HealthProbeServer.js'),
},
kmip: require('./lib/network/kmip'),
kmipClient: require('./lib/network/kmip/Client'),
}, },
s3routes: { s3routes: {
routes: require('./lib/s3routes/routes'), routes: require('./lib/s3routes/routes'),
@ -63,6 +62,9 @@ module.exports = {
convertToXml: require('./lib/s3middleware/convertToXml'), convertToXml: require('./lib/s3middleware/convertToXml'),
escapeForXml: require('./lib/s3middleware/escapeForXml'), escapeForXml: require('./lib/s3middleware/escapeForXml'),
tagging: require('./lib/s3middleware/tagging'), tagging: require('./lib/s3middleware/tagging'),
checkDateModifiedHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.checkDateModifiedHeaders,
validateConditionalHeaders: validateConditionalHeaders:
require('./lib/s3middleware/validateConditionalHeaders') require('./lib/s3middleware/validateConditionalHeaders')
.validateConditionalHeaders, .validateConditionalHeaders,
@ -80,12 +82,39 @@ module.exports = {
}, },
storage: { storage: {
metadata: { metadata: {
MetadataFileServer: MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
require('./lib/storage/metadata/file/MetadataFileServer'), bucketclient: {
MetadataFileClient: BucketClientInterface:
require('./lib/storage/metadata/file/MetadataFileClient'), require('./lib/storage/metadata/bucketclient/' +
LogConsumer: 'BucketClientInterface'),
require('./lib/storage/metadata/bucketclient/LogConsumer'), LogConsumer:
require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
file: {
BucketFileInterface:
require('./lib/storage/metadata/file/BucketFileInterface'),
MetadataFileServer:
require('./lib/storage/metadata/file/MetadataFileServer'),
MetadataFileClient:
require('./lib/storage/metadata/file/MetadataFileClient'),
},
inMemory: {
metastore:
require('./lib/storage/metadata/in_memory/metastore'),
metadata: require('./lib/storage/metadata/in_memory/metadata'),
bucketUtilities:
require('./lib/storage/metadata/in_memory/bucket_utilities'),
},
mongoclient: {
MongoClientInterface:
require('./lib/storage/metadata/mongoclient/' +
'MongoClientInterface'),
LogConsumer:
require('./lib/storage/metadata/mongoclient/LogConsumer'),
},
proxy: {
Server: require('./lib/storage/metadata/proxy/Server'),
},
}, },
data: { data: {
file: { file: {
@ -114,4 +143,8 @@ module.exports = {
pensieve: { pensieve: {
credentialUtils: require('./lib/executables/pensieveCreds/utils'), credentialUtils: require('./lib/executables/pensieveCreds/utils'),
}, },
backbeat: {
Metrics: require('./lib/backbeat/Metrics'),
routes: require('./lib/backbeat/routes'),
},
}; };

View File

@ -0,0 +1,9 @@
module.exports = {
Basic: require('./basic').List,
Delimiter: require('./delimiter').Delimiter,
DelimiterVersions: require('./delimiterVersions')
.DelimiterVersions,
DelimiterMaster: require('./delimiterMaster')
.DelimiterMaster,
MPU: require('./MPU').MultipartUploads,
};

88
lib/algos/list/skip.js Normal file
View File

@ -0,0 +1,88 @@
const assert = require('assert');
const { FILTER_END, FILTER_SKIP, SKIP_NONE } = require('./tools');
const MAX_STREAK_LENGTH = 100;
/**
* Handle the filtering and the skip mechanism of a listing result.
*/
class Skip {
/**
* @param {Object} params - skip parameters
* @param {Object} params.extension - delimiter extension used (required)
* @param {String} params.gte - current range gte (greater than or
* equal) used by the client code
*/
constructor(params) {
assert(params.extension);
this.extension = params.extension;
this.gteParams = params.gte;
this.listingEndCb = null;
this.skipRangeCb = null;
/* Used to count consecutive FILTER_SKIP returned by the extension
* filter method. Once this counter reaches MAX_STREAK_LENGTH, the
* filter function tries to skip unwanted values by defining a new
* range. */
this.streakLength = 0;
}
setListingEndCb(cb) {
this.listingEndCb = cb;
}
setSkipRangeCb(cb) {
this.skipRangeCb = cb;
}
/**
* Filter an entry.
* @param {Object} entry - entry to filter.
* @return {undefined}
*
* This function calls the listing end or the skip range callbacks if
* needed.
*/
filter(entry) {
assert(this.listingEndCb);
assert(this.skipRangeCb);
const filteringResult = this.extension.filter(entry);
const skippingRange = this.extension.skipping();
if (filteringResult === FILTER_END) {
this.listingEndCb();
} else if (filteringResult === FILTER_SKIP
&& skippingRange !== SKIP_NONE) {
if (++this.streakLength >= MAX_STREAK_LENGTH) {
const newRange = this._inc(skippingRange);
/* Avoid to loop on the same range again and again. */
if (newRange === this.gteParams) {
this.streakLength = 1;
} else {
this.skipRangeCb(newRange);
}
}
} else {
this.streakLength = 0;
}
}
_inc(str) {
if (!str) {
return str;
}
const lastCharValue = str.charCodeAt(str.length - 1);
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
}
}
module.exports = Skip;

View File

@ -127,6 +127,17 @@ function check(request, log, data, awsService) {
return { err: errors.RequestTimeTooSkewed }; return { err: errors.RequestTimeTooSkewed };
} }
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath, err });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
const stringToSign = constructStringToSign({ const stringToSign = constructStringToSign({
log, log,
request, request,
@ -136,6 +147,7 @@ function check(request, log, data, awsService) {
timestamp, timestamp,
payloadChecksum, payloadChecksum,
awsService: service, awsService: service,
proxyPath,
}); });
log.trace('constructed stringToSign', { stringToSign }); log.trace('constructed stringToSign', { stringToSign });
if (stringToSign instanceof Error) { if (stringToSign instanceof Error) {

View File

@ -62,6 +62,17 @@ function check(request, log, data) {
return { err: errors.RequestTimeTooSkewed }; return { err: errors.RequestTimeTooSkewed };
} }
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
// In query v4 auth, the canonical request needs // In query v4 auth, the canonical request needs
// to include the query params OTHER THAN // to include the query params OTHER THAN
// the signature so create a // the signature so create a
@ -87,6 +98,7 @@ function check(request, log, data) {
credentialScope: credentialScope:
`${scopeDate}/${region}/${service}/${requestType}`, `${scopeDate}/${region}/${service}/${requestType}`,
awsService: service, awsService: service,
proxyPath,
}); });
if (stringToSign instanceof Error) { if (stringToSign instanceof Error) {
return { err: stringToSign }; return { err: stringToSign };

539
lib/backbeat/Metrics.js Normal file
View File

@ -0,0 +1,539 @@
const async = require('async');
const errors = require('../../lib/errors');
const RedisClient = require('../../lib/metrics/RedisClient');
const StatsModel = require('../../lib/metrics/StatsModel');
const INTERVAL = 300; // 5 minutes
const EXPIRY = 86400; // 24 hours
const THROUGHPUT_EXPIRY = 900; // 15 minutes
const isTest = process.env.CI === 'true';
class Metrics {
constructor(config, logger) {
const { redisConfig, validSites, internalStart } = config;
this._logger = logger;
this._redisClient = new RedisClient(redisConfig, this._logger);
// Redis expiry increased by an additional interval so we can reference
// the immediate older data for average throughput calculation
this._statsClient = new StatsModel(this._redisClient, INTERVAL, EXPIRY);
this._validSites = validSites;
this._internalStart = internalStart;
}
/**
* Query StatsClient for all ops given
* @param {array} ops - array of redis key names to query
* @param {string} site - site name or '*' wildcard
* @param {string} bucketName - the name of the bucket
* @param {string} objectKey - the object key name
* @param {string} versionId - the object version ID
* @param {function} cb - callback(err, res)
* @return {undefined}
*/
_queryStats(ops, site, bucketName, objectKey, versionId, cb) {
return async.map(ops, (op, done) => {
const hasGlobalKey = this._hasGlobalKey(op);
if (site === 'all') {
const queryStrings = this._validSites.map(s => {
if (bucketName && objectKey && versionId) {
return `${s}:${bucketName}:${objectKey}:` +
`${versionId}:${op}`;
}
return `${s}:${op}`;
});
if (hasGlobalKey) {
return this._statsClient.getAllGlobalStats(queryStrings,
this._logger, done);
}
return this._statsClient.getAllStats(this._logger, queryStrings,
done);
}
// Query only a single given site or storage class
// First, validate the site or storage class
if (!this._validSites.includes(site)) {
// escalate error to log later
return done({
message: 'invalid site name provided',
type: errors.RouteNotFound,
method: 'Metrics._queryStats',
});
}
let queryString;
if (bucketName && objectKey && versionId) {
queryString =
`${site}:${bucketName}:${objectKey}:${versionId}:${op}`;
} else {
queryString = `${site}:${op}`;
}
if (hasGlobalKey) {
return this._redisClient.get(queryString, (err, res) => {
if (err) {
return done({
message: `Redis error: ${err.message}`,
type: errors.InternalError,
method: 'Metrics._queryStats',
});
}
return done(null, { requests: [res || 0] });
});
}
return this._statsClient.getStats(this._logger, queryString, done);
}, cb);
}
/**
* Determines whether the Redis op uses a global counter or interval key.
* @param {String} op - The Redis operation
* @return {Boolean} true if a global counter, false otherwise
*/
_hasGlobalKey(op) {
if (isTest) {
return op.includes('test:bb:bytespending') ||
op.includes('test:bb:opspending');
}
return op.includes('bb:crr:bytespending') ||
op.includes('bb:crr:opspending');
}
/**
* Get data points which are the keys used to query Redis
* @param {object} details - route details from lib/backbeat/routes.js
* @param {array} data - provides already fetched data in order of
* dataPoints mentioned for each route in lib/backbeat/routes.js. This can
* be undefined.
* @param {function} cb - callback(error, data), where data returns
* data stored in Redis.
* @return {array} dataPoints array defined in lib/backbeat/routes.js
*/
_getData(details, data, cb) {
if (!data) {
const { dataPoints, site, bucketName, objectKey,
versionId } = details;
return this._queryStats(dataPoints, site, bucketName, objectKey,
versionId, cb);
}
return cb(null, data);
}
/**
* Uptime of server based on this._internalStart up to max of expiry
* @param {number} expiry - max expiry
* @return {number} uptime of server up to expiry time
*/
_getMaxUptime(expiry) {
let secondsSinceStart = (Date.now() - this._internalStart) / 1000;
// allow only a minimum value of 1 for uptime
if (secondsSinceStart < 1) {
secondsSinceStart = 1;
}
return secondsSinceStart < expiry ? secondsSinceStart : expiry;
}
/**
* Get replication backlog in ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getBacklog(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: backlog', {
origin: err.method,
method: 'Metrics.getBacklog',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: backlog', {
method: 'Metrics.getBacklog',
});
return cb(errors.InternalError);
}
const count = Number.parseInt(res[0].requests, 10);
const size = Number.parseInt(res[1].requests, 10);
const response = {
backlog: {
description: 'Number of incomplete replication ' +
'operations (count) and number of incomplete bytes ' +
'transferred (size)',
results: {
count: count < 0 ? 0 : count,
size: size < 0 ? 0 : size,
},
},
};
return cb(null, response);
});
}
/**
* Get completed replicated stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getCompletions(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: completions', {
origin: err.method,
method: 'Metrics.getCompletions',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: completions', {
method: 'Metrics.getCompletions',
});
return cb(errors.InternalError);
}
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsDone, bytesDone] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const response = {
completions: {
description: 'Number of completed replication operations ' +
'(count) and number of bytes transferred (size) in ' +
`the last ${Math.floor(uptime)} seconds`,
results: {
count: opsDone,
size: bytesDone,
},
},
};
return cb(null, response);
});
}
/**
* Get failed replication stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getFailedMetrics(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: failures', {
origin: err.emthod,
method: 'Metrics.getFailedMetrics',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: failures', {
method: 'Metrics.getFailedMetrics',
});
return cb(errors.InternalError);
}
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsFail, bytesFail] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const response = {
failures: {
description: 'Number of failed replication operations ' +
'(count) and bytes (size) in the last ' +
`${Math.floor(uptime)} seconds`,
results: {
count: opsFail,
size: bytesFail,
},
},
};
return cb(null, response);
});
}
/**
* Get current throughput in ops/sec and bytes/sec up to max of 15 minutes
* Throughput is the number of units processed in a given time
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getThroughput(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: throughput', {
origin: err.method,
method: 'Metrics.getThroughput',
});
return cb(err.type.customizeDescription(err.message));
}
if (err) {
this._logger.error('error getting metrics: throughput', {
method: 'Metrics.getThroughput',
});
return cb(errors.InternalError);
}
const now = new Date();
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsThroughput, bytesThroughput] = res.map(r => {
let total = r.requests.slice(0, numOfIntervals).reduce(
(acc, i) => acc + i, 0);
// if uptime !== THROUGHPUT_EXPIRY, use internal timer and
// do not include the extra 4th interval
if (uptime === THROUGHPUT_EXPIRY) {
// all intervals apply, including 4th interval
const lastInterval =
this._statsClient._normalizeTimestamp(now);
// in seconds
const diff = (now - lastInterval) / 1000;
// Get average for last interval depending on time
// surpassed so far for newest interval
total += ((INTERVAL - diff) / INTERVAL) *
r.requests[numOfIntervals];
}
// Divide total by uptime to determine data per second
return (total / uptime);
});
const response = {
throughput: {
description: 'Current throughput for replication ' +
'operations in ops/sec (count) and bytes/sec (size) ' +
`in the last ${Math.floor(uptime)} seconds`,
results: {
count: opsThroughput.toFixed(2),
size: bytesThroughput.toFixed(2),
},
},
};
return cb(null, response);
});
}
/**
* Get current throughput for an object in bytes/sec. Throughput is the
* number of bytes transferred in a given time.
* @param {object} details - route details from lib/api/routes.js
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
getObjectThroughput(details, cb) {
this._getData(details, undefined, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: object throughput', {
origin: err.method,
method: 'Metrics.getObjectThroughput',
});
return cb(err.type.customizeDescription(err.message));
}
if (err) {
this._logger.error('error getting metrics: object throughput', {
method: 'Metrics.getObjectThroughput',
error: err.message,
});
return cb(errors.InternalError);
}
const now = new Date();
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const { requests } = res[0]; // Bytes done
let total = requests.slice(0, numOfIntervals)
.reduce((acc, i) => acc + i, 0);
// if uptime !== THROUGHPUT_EXPIRY, use internal timer
// and do not include the extra 4th interval
if (uptime === THROUGHPUT_EXPIRY) {
// all intervals apply, including 4th interval
const lastInterval =
this._statsClient._normalizeTimestamp(now);
// in seconds
const diff = (now - lastInterval) / 1000;
// Get average for last interval depending on time passed so
// far for newest interval
total += ((INTERVAL - diff) / INTERVAL) *
requests[numOfIntervals];
}
// Divide total by timeDisplay to determine data per second
const response = {
description: 'Current throughput for object replication in ' +
'bytes/sec (throughput)',
throughput: (total / uptime).toFixed(2),
};
return cb(null, response);
});
}
/**
* Get CRR progress for an object in bytes. Progress is the percentage of
* the object that has completed replication.
* @param {object} details - route details from lib/api/routes.js
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
getObjectProgress(details, cb) {
this._getData(details, undefined, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: object progress', {
origin: err.method,
method: 'Metrics.getObjectProgress',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: object progress', {
method: 'Metrics.getObjectProgress',
error: err.message,
});
return cb(errors.InternalError);
}
// Find if time since start is less than EXPIRY time
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [totalBytesToComplete, bytesComplete] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const ratio = totalBytesToComplete === 0 ? 0 :
bytesComplete / totalBytesToComplete;
const percentage = (ratio * 100).toFixed();
const response = {
description: 'Number of bytes to be replicated ' +
'(pending), number of bytes transferred to the ' +
'destination (completed), and percentage of the ' +
'object that has completed replication (progress)',
pending: totalBytesToComplete - bytesComplete,
completed: bytesComplete,
progress: `${percentage}%`,
};
return cb(null, response);
});
}
/**
* Get pending replication stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getPending(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: pending', {
origin: err.method,
method: 'Metrics.getPending',
});
return cb(err.type.customizeDescription(err.message));
}
const { dataPoints } = details;
if (err || res.length !== dataPoints.length) {
this._logger.error('error getting metrics: pending', {
method: 'Metrics.getPending',
error: err,
dataPoints,
res,
});
return cb(errors.InternalError
.customizeDescription('error getting metrics: pending'));
}
const count = Number.parseInt(res[0].requests, 10);
const size = Number.parseInt(res[1].requests, 10);
const response = {
pending: {
description: 'Number of pending replication ' +
'operations (count) and bytes (size)',
results: {
count: count < 0 ? 0 : count,
size: size < 0 ? 0 : size,
},
},
};
return cb(null, response);
});
}
/**
* Get all metrics
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb = callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getAllMetrics(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: all', {
origin: err.method,
method: 'Metrics.getAllMetrics',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: all', {
method: 'Metrics.getAllMetrics',
});
return cb(errors.InternalError);
}
// NOTE: Edited to fit failed metrics
const failMetricsDetails = Object.assign({}, details,
{ dataPoints: new Array(2) });
// res = [ ops, ops_done, ops_fail, bytes, bytes_done, bytes_fail,
// opsPending, bytesPending ]
return async.parallel([
done => this.getBacklog({ dataPoints: new Array(2) }, done,
[res[6], res[7]]),
done => this.getCompletions({ dataPoints: new Array(2) }, done,
[res[1], res[4]]),
done => this.getFailedMetrics(failMetricsDetails, done,
[res[2], res[5]]),
done => this.getThroughput({ dataPoints: new Array(2) }, done,
[res[1], res[4]]),
done => this.getPending({ dataPoints: new Array(2) }, done,
[res[6], res[7]]),
], (err, results) => {
if (err) {
this._logger.error('error getting metrics: all', {
method: 'Metrics.getAllMetrics',
});
return cb(errors.InternalError);
}
const store = Object.assign({}, ...results);
return cb(null, store);
});
});
}
/**
* Close redis client
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
disconnect(cb) {
return this._redisClient.disconnect(cb);
}
/**
* Retrieve the list of redis client connectiosn
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
listClients(cb) {
return this._redisClient.listClients(cb);
}
}
module.exports = Metrics;

167
lib/backbeat/routes.js Normal file
View File

@ -0,0 +1,167 @@
/*
This file contains Backbeat API routes and route details
*/
/**
* The metrics route model.
* @param {Object} redisKeys - The Redis keys used for Backbeat metrics
* @param {Array} allLocations - The list of replication location names
* @return {Array} The array of route objects
*/
function routes(redisKeys, allLocations) {
return [
// Route: /_/healthcheck
{
httpMethod: 'GET',
category: 'healthcheck',
type: 'basic',
method: 'getHealthcheck',
extensions: {},
},
// Route: /_/metrics/crr/<location>/pending
{
httpMethod: 'GET',
category: 'metrics',
type: 'pending',
extensions: { crr: [...allLocations, 'all'] },
method: 'getPending',
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<location>/backlog
{
httpMethod: 'GET',
category: 'metrics',
type: 'backlog',
extensions: { crr: [...allLocations, 'all'] },
method: 'getBacklog',
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<location>/completions
{
httpMethod: 'GET',
category: 'metrics',
type: 'completions',
extensions: { crr: [...allLocations, 'all'] },
method: 'getCompletions',
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
},
// Route: /_/metrics/crr/<location>/failures
{
httpMethod: 'GET',
category: 'metrics',
type: 'failures',
extensions: { crr: [...allLocations, 'all'] },
method: 'getFailedMetrics',
dataPoints: [redisKeys.opsFail, redisKeys.bytesFail],
},
// Route: /_/metrics/crr/<location>/throughput
{
httpMethod: 'GET',
category: 'metrics',
type: 'throughput',
extensions: { crr: [...allLocations, 'all'] },
method: 'getThroughput',
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
},
// Route: /_/metrics/crr/<location>/all
{
httpMethod: 'GET',
category: 'metrics',
type: 'all',
extensions: { crr: [...allLocations, 'all'] },
method: 'getAllMetrics',
dataPoints: [redisKeys.ops, redisKeys.opsDone, redisKeys.opsFail,
redisKeys.bytes, redisKeys.bytesDone, redisKeys.bytesFail,
redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<site>/progress/<bucket>/<key>
{
httpMethod: 'GET',
category: 'metrics',
type: 'progress',
level: 'object',
extensions: { crr: [...allLocations] },
method: 'getObjectProgress',
dataPoints: [redisKeys.objectBytes, redisKeys.objectBytesDone],
},
// Route: /_/metrics/crr/<site>/throughput/<bucket>/<key>
{
httpMethod: 'GET',
category: 'metrics',
type: 'throughput',
level: 'object',
extensions: { crr: [...allLocations] },
method: 'getObjectThroughput',
dataPoints: [redisKeys.objectBytesDone],
},
// Route: /_/crr/failed?site=<site>&marker=<marker>
{
httpMethod: 'GET',
type: 'all',
extensions: { crr: ['failed'] },
method: 'getSiteFailedCRR',
},
// Route: /_/crr/failed/<bucket>/<key>/<versionId>
{
httpMethod: 'GET',
type: 'specific',
extensions: { crr: ['failed'] },
method: 'getFailedCRR',
},
// Route: /_/crr/failed
{
httpMethod: 'POST',
type: 'all',
extensions: { crr: ['failed'] },
method: 'retryFailedCRR',
},
// Route: /_/monitoring/metrics
{
httpMethod: 'GET',
category: 'monitoring',
type: 'metrics',
extensions: {},
method: 'monitoringHandler',
},
// Route: /_/crr/pause/<location>
// Where <location> is an optional field
{
httpMethod: 'POST',
type: 'pause',
extensions: { crr: [...allLocations, 'all'] },
method: 'pauseCRRService',
},
// Route: /_/crr/resume/<location>
// Route: /_/crr/resume/<location>/schedule
// Where <location> is an optional field unless "schedule" route
{
httpMethod: 'POST',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'resumeCRRService',
},
{
httpMethod: 'DELETE',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'deleteScheduledResumeService',
},
// Route: /_/crr/resume/<location>
{
httpMethod: 'GET',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'getResumeCRRSchedule',
},
// Route: /_/crr/status/<location>
// Where <location> is an optional field
{
httpMethod: 'GET',
type: 'status',
extensions: { crr: [...allLocations, 'all'] },
method: 'getCRRServiceStatus',
},
];
}
module.exports = routes;

View File

@ -72,4 +72,10 @@ module.exports = {
permittedCapitalizedBuckets: { permittedCapitalizedBuckets: {
METADATA: true, METADATA: true,
}, },
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
objectKeyByteLimit: 915,
}; };

13
lib/errorUtils.js Normal file
View File

@ -0,0 +1,13 @@
function reshapeExceptionError(error) {
const { message, code, stack, name } = error;
return {
message,
code,
stack,
name,
};
}
module.exports = {
reshapeExceptionError,
};

View File

@ -22,6 +22,28 @@ class RedisClient {
return this; return this;
} }
/**
* scan a pattern and return matching keys
* @param {string} pattern - string pattern to match with all existing keys
* @param {number} [count=10] - scan count
* @param {callback} cb - callback (error, result)
* @return {undefined}
*/
scan(pattern, count = 10, cb) {
const params = { match: pattern, count };
const keys = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/** /**
* increment value of a key by 1 and set a ttl * increment value of a key by 1 and set a ttl
* @param {string} key - key holding the value * @param {string} key - key holding the value
@ -35,6 +57,17 @@ class RedisClient {
.exec(cb); .exec(cb);
} }
/**
* increment value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, amount, cb) {
return this._client.incrby(key, amount, cb);
}
/** /**
* increment value of a key by a given amount and set a ttl * increment value of a key by a given amount and set a ttl
* @param {string} key - key holding the value * @param {string} key - key holding the value
@ -50,13 +83,24 @@ class RedisClient {
} }
/** /**
* execute a batch of commands * decrement value of a key by a given amount
* @param {string[]} cmds - list of commands * @param {string} key - key holding the value
* @param {callback} cb - callback * @param {number} amount - amount to increase by
* @return {undefined} * @param {callback} cb - callback
*/ * @return {undefined}
batch(cmds, cb) { */
return this._client.pipeline(cmds).exec(cb); decrby(key, amount, cb) {
return this._client.decrby(key, amount, cb);
}
/**
* get value stored at key
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
return this._client.get(key, cb);
} }
/** /**
@ -71,6 +115,16 @@ class RedisClient {
return this._client.exists(key, cb); return this._client.exists(key, cb);
} }
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
}
/** /**
* Add a value and its score to a sorted set. If no sorted set exists, this * Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key. * will create a new one for the given key.
@ -150,9 +204,27 @@ class RedisClient {
return this._client.zrangebyscore(key, min, max, cb); return this._client.zrangebyscore(key, min, max, cb);
} }
/**
* get TTL or expiration in seconds
* @param {string} key - name of key
* @param {function} cb - callback
* @return {undefined}
*/
ttl(key, cb) {
return this._client.ttl(key, cb);
}
clear(cb) { clear(cb) {
return this._client.flushdb(cb); return this._client.flushdb(cb);
} }
disconnect(cb) {
return this._client.quit(cb);
}
listClients(cb) {
return this._client.client('list', cb);
}
} }
module.exports = RedisClient; module.exports = RedisClient;

View File

@ -41,11 +41,11 @@ class StatsClient {
/** /**
* build redis key to get total number of occurrences on the server * build redis key to get total number of occurrences on the server
* @param {string} name - key name identifier * @param {string} name - key name identifier
* @param {object} d - Date instance * @param {Date} date - Date instance
* @return {string} key - key for redis * @return {string} key - key for redis
*/ */
_buildKey(name, d) { buildKey(name, date) {
return `${name}:${this._normalizeTimestamp(d)}`; return `${name}:${this._normalizeTimestamp(date)}`;
} }
/** /**
@ -85,11 +85,35 @@ class StatsClient {
amount = (typeof incr === 'number') ? incr : 1; amount = (typeof incr === 'number') ? incr : 1;
} }
const key = this._buildKey(`${id}:requests`, new Date()); const key = this.buildKey(`${id}:requests`, new Date());
return this._redis.incrbyEx(key, amount, this._expiry, callback); return this._redis.incrbyEx(key, amount, this._expiry, callback);
} }
/**
* Increment the given key by the given value.
* @param {String} key - The Redis key to increment
* @param {Number} incr - The value to increment by
* @param {function} [cb] - callback
* @return {undefined}
*/
incrementKey(key, incr, cb) {
const callback = cb || this._noop;
return this._redis.incrby(key, incr, callback);
}
/**
* Decrement the given key by the given value.
* @param {String} key - The Redis key to decrement
* @param {Number} decr - The value to decrement by
* @param {function} [cb] - callback
* @return {undefined}
*/
decrementKey(key, decr, cb) {
const callback = cb || this._noop;
return this._redis.decrby(key, decr, callback);
}
/** /**
* report/record a request that ended up being a 500 on the server * report/record a request that ended up being a 500 on the server
* @param {string} id - service identifier * @param {string} id - service identifier
@ -101,10 +125,54 @@ class StatsClient {
return undefined; return undefined;
} }
const callback = cb || this._noop; const callback = cb || this._noop;
const key = this._buildKey(`${id}:500s`, new Date()); const key = this.buildKey(`${id}:500s`, new Date());
return this._redis.incrEx(key, this._expiry, callback); return this._redis.incrEx(key, this._expiry, callback);
} }
/**
* wrapper on `getStats` that handles a list of keys
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const statsRes = {
'requests': 0,
'500s': 0,
'sampleDuration': this._expiry,
};
let requests = 0;
let errors = 0;
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests += res.requests;
errors += res['500s'];
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsClient.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = requests;
statsRes['500s'] = errors;
return cb(null, statsRes);
});
}
/** /**
* get stats for the last x seconds, x being the sampling duration * get stats for the last x seconds, x being the sampling duration
* @param {object} log - Werelogs request logger * @param {object} log - Werelogs request logger
@ -121,8 +189,8 @@ class StatsClient {
const reqsKeys = []; const reqsKeys = [];
const req500sKeys = []; const req500sKeys = [];
for (let i = 0; i < totalKeys; i++) { for (let i = 0; i < totalKeys; i++) {
reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]); reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]); req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
this._setPrevInterval(d); this._setPrevInterval(d);
} }
return async.parallel([ return async.parallel([

View File

@ -1,11 +1,149 @@
const async = require('async');
const StatsClient = require('./StatsClient'); const StatsClient = require('./StatsClient');
/**
/**
* @class StatsModel * @class StatsModel
* *
* @classdesc Extend and overwrite how timestamps are normalized by minutes * @classdesc Extend and overwrite how timestamps are normalized by minutes
* rather than by seconds * rather than by seconds
*/ */
class StatsModel extends StatsClient { class StatsModel extends StatsClient {
/**
* Utility method to convert 2d array rows to columns, and vice versa
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
* @param {array} arrays - 2d array of integers
* @return {array} converted array
*/
_zip(arrays) {
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
return arrays[0].map((_, i) => arrays.map(a => a[i]));
}
return [];
}
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the count as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval with length of (expiry / interval)
*/
_getCount(arr) {
const size = Math.floor(this._expiry / this._interval);
const array = arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
if (array.length < size) {
array.push(...Array(size - array.length).fill(0));
}
return array;
}
/**
* wrapper on `getStats` that handles a list of keys
* override the method to reduce the returned 2d array from `_getCount`
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const size = Math.floor(this._expiry / this._interval);
const statsRes = {
'requests': Array(size).fill(0),
'500s': Array(size).fill(0),
'sampleDuration': this._expiry,
};
const requests = [];
const errors = [];
if (ids.length === 0) {
return cb(null, statsRes);
}
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests.push(res.requests);
errors.push(res['500s']);
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsModel.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = this._zip(requests).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
statsRes['500s'] = this._zip(errors).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
return cb(null, statsRes);
});
}
/**
* Handles getting a list of global keys.
* @param {array} ids - Service identifiers
* @param {object} log - Werelogs request logger
* @param {function} cb - Callback
* @return {undefined}
*/
getAllGlobalStats(ids, log, cb) {
const reqsKeys = ids.map(key => (['get', key]));
return this._redis.batch(reqsKeys, (err, res) => {
const statsRes = { requests: 0 };
if (err) {
log.error('error getting metrics', {
error: err,
method: 'StatsClient.getAllGlobalStats',
});
return cb(null, statsRes);
}
statsRes.requests = res.reduce((sum, curr) => {
const [cmdErr, val] = curr;
if (cmdErr) {
// Log any individual request errors from the batch request.
log.error('error getting metrics', {
error: cmdErr,
method: 'StatsClient.getAllGlobalStats',
});
}
return sum + (Number.parseInt(val, 10) || 0);
}, 0);
return cb(null, statsRes);
});
}
/** /**
* normalize date timestamp to the nearest hour * normalize date timestamp to the nearest hour
* @param {Date} d - Date instance * @param {Date} d - Date instance
@ -24,34 +162,6 @@ class StatsModel extends StatsClient {
return d.setHours(d.getHours() - 1); return d.setHours(d.getHours() - 1);
} }
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the result as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval
*/
_getCount(arr) {
return arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
}
/** /**
* get list of sorted set key timestamps * get list of sorted set key timestamps
* @param {number} epoch - epoch time * @param {number} epoch - epoch time

View File

@ -1,10 +1,13 @@
const assert = require('assert'); const assert = require('assert');
const uuid = require('uuid/v4');
const { WebsiteConfiguration } = require('./WebsiteConfiguration'); const { WebsiteConfiguration } = require('./WebsiteConfiguration');
const ReplicationConfiguration = require('./ReplicationConfiguration'); const ReplicationConfiguration = require('./ReplicationConfiguration');
const LifecycleConfiguration = require('./LifecycleConfiguration'); const LifecycleConfiguration = require('./LifecycleConfiguration');
// WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG // WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG
const modelVersion = 6; // MODELVERSION.MD can be found in S3 repo: lib/metadata/ModelVersion.md
const modelVersion = 9;
class BucketInfo { class BucketInfo {
/** /**
@ -47,12 +50,17 @@ class BucketInfo {
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications * @param {string[]} [cors[].exposeHeaders] - headers expose to applications
* @param {object} [replicationConfiguration] - replication configuration * @param {object} [replicationConfiguration] - replication configuration
* @param {object} [lifecycleConfiguration] - lifecycle configuration * @param {object} [lifecycleConfiguration] - lifecycle configuration
* @param {string} [uid] - unique identifier for the bucket, necessary
* @param {string} readLocationConstraint - readLocationConstraint for bucket
* addition for use with lifecycle operations
* @param {boolean} [isNFS] - whether the bucket is on NFS
*/ */
constructor(name, owner, ownerDisplayName, creationDate, constructor(name, owner, ownerDisplayName, creationDate,
mdBucketModelVersion, acl, transient, deleted, mdBucketModelVersion, acl, transient, deleted,
serverSideEncryption, versioningConfiguration, serverSideEncryption, versioningConfiguration,
locationConstraint, websiteConfiguration, cors, locationConstraint, websiteConfiguration, cors,
replicationConfiguration, lifecycleConfiguration) { replicationConfiguration, lifecycleConfiguration, uid,
readLocationConstraint, isNFS) {
assert.strictEqual(typeof name, 'string'); assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof owner, 'string'); assert.strictEqual(typeof owner, 'string');
assert.strictEqual(typeof ownerDisplayName, 'string'); assert.strictEqual(typeof ownerDisplayName, 'string');
@ -90,6 +98,9 @@ class BucketInfo {
if (locationConstraint) { if (locationConstraint) {
assert.strictEqual(typeof locationConstraint, 'string'); assert.strictEqual(typeof locationConstraint, 'string');
} }
if (readLocationConstraint) {
assert.strictEqual(typeof readLocationConstraint, 'string');
}
if (websiteConfiguration) { if (websiteConfiguration) {
assert(websiteConfiguration instanceof WebsiteConfiguration); assert(websiteConfiguration instanceof WebsiteConfiguration);
const { indexDocument, errorDocument, redirectAllRequestsTo, const { indexDocument, errorDocument, redirectAllRequestsTo,
@ -112,6 +123,10 @@ class BucketInfo {
if (lifecycleConfiguration) { if (lifecycleConfiguration) {
LifecycleConfiguration.validateConfig(lifecycleConfiguration); LifecycleConfiguration.validateConfig(lifecycleConfiguration);
} }
if (uid) {
assert.strictEqual(typeof uid, 'string');
assert.strictEqual(uid.length, 36);
}
const aclInstance = acl || { const aclInstance = acl || {
Canned: 'private', Canned: 'private',
FULL_CONTROL: [], FULL_CONTROL: [],
@ -133,10 +148,13 @@ class BucketInfo {
this._serverSideEncryption = serverSideEncryption || null; this._serverSideEncryption = serverSideEncryption || null;
this._versioningConfiguration = versioningConfiguration || null; this._versioningConfiguration = versioningConfiguration || null;
this._locationConstraint = locationConstraint || null; this._locationConstraint = locationConstraint || null;
this._readLocationConstraint = readLocationConstraint || null;
this._websiteConfiguration = websiteConfiguration || null; this._websiteConfiguration = websiteConfiguration || null;
this._replicationConfiguration = replicationConfiguration || null; this._replicationConfiguration = replicationConfiguration || null;
this._cors = cors || null; this._cors = cors || null;
this._lifecycleConfiguration = lifecycleConfiguration || null; this._lifecycleConfiguration = lifecycleConfiguration || null;
this._uid = uid || uuid();
this._isNFS = isNFS || null;
return this; return this;
} }
/** /**
@ -156,10 +174,13 @@ class BucketInfo {
serverSideEncryption: this._serverSideEncryption, serverSideEncryption: this._serverSideEncryption,
versioningConfiguration: this._versioningConfiguration, versioningConfiguration: this._versioningConfiguration,
locationConstraint: this._locationConstraint, locationConstraint: this._locationConstraint,
readLocationConstraint: this._readLocationConstraint,
websiteConfiguration: undefined, websiteConfiguration: undefined,
cors: this._cors, cors: this._cors,
replicationConfiguration: this._replicationConfiguration, replicationConfiguration: this._replicationConfiguration,
lifecycleConfiguration: this._lifecycleConfiguration, lifecycleConfiguration: this._lifecycleConfiguration,
uid: this._uid,
isNFS: this._isNFS,
}; };
if (this._websiteConfiguration) { if (this._websiteConfiguration) {
bucketInfos.websiteConfiguration = bucketInfos.websiteConfiguration =
@ -180,7 +201,8 @@ class BucketInfo {
obj.creationDate, obj.mdBucketModelVersion, obj.acl, obj.creationDate, obj.mdBucketModelVersion, obj.acl,
obj.transient, obj.deleted, obj.serverSideEncryption, obj.transient, obj.deleted, obj.serverSideEncryption,
obj.versioningConfiguration, obj.locationConstraint, websiteConfig, obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration); obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
obj.uid, obj.readLocationConstraint, obj.isNFS);
} }
/** /**
@ -203,7 +225,8 @@ class BucketInfo {
data._transient, data._deleted, data._serverSideEncryption, data._transient, data._deleted, data._serverSideEncryption,
data._versioningConfiguration, data._locationConstraint, data._versioningConfiguration, data._locationConstraint,
data._websiteConfiguration, data._cors, data._websiteConfiguration, data._cors,
data._replicationConfiguration, data._lifecycleConfiguration); data._replicationConfiguration, data._lifecycleConfiguration,
data._uid, data._readLocationConstraint, data._isNFS);
} }
/** /**
@ -449,6 +472,17 @@ class BucketInfo {
return this._locationConstraint; return this._locationConstraint;
} }
/**
* Get read location constraint.
* @return {string} - bucket read location constraint
*/
getReadLocationConstraint() {
if (this._readLocationConstraint) {
return this._readLocationConstraint;
}
return this._locationConstraint;
}
/** /**
* Set Bucket model version * Set Bucket model version
* *
@ -521,6 +555,29 @@ class BucketInfo {
return this._versioningConfiguration && return this._versioningConfiguration &&
this._versioningConfiguration.Status === 'Enabled'; this._versioningConfiguration.Status === 'Enabled';
} }
/**
* Get unique id of bucket.
* @return {string} - unique id
*/
getUid() {
return this._uid;
}
/**
* Check if the bucket is an NFS bucket.
* @return {boolean} - Wether the bucket is NFS or not
*/
isNFS() {
return this._isNFS;
}
/**
* Set whether the bucket is an NFS bucket.
* @param {boolean} isNFS - Wether the bucket is NFS or not
* @return {BucketInfo} - bucket info instance
*/
setIsNFS(isNFS) {
this._isNFS = isNFS;
return this;
}
} }
module.exports = BucketInfo; module.exports = BucketInfo;

View File

@ -269,7 +269,7 @@ class LifecycleConfiguration {
return filterObj; return filterObj;
} }
if (filter.Tag) { if (filter.Tag) {
const tagObj = this._parseTags(filter.Tag[0]); const tagObj = this._parseTags(filter.Tag);
if (tagObj.error) { if (tagObj.error) {
filterObj.error = tagObj.error; filterObj.error = tagObj.error;
return filterObj; return filterObj;
@ -287,7 +287,7 @@ class LifecycleConfiguration {
if (andF.Prefix && andF.Prefix.length >= 1) { if (andF.Prefix && andF.Prefix.length >= 1) {
filterObj.rulePrefix = andF.Prefix.pop(); filterObj.rulePrefix = andF.Prefix.pop();
} }
const tagObj = this._parseTags(andF.Tag[0]); const tagObj = this._parseTags(andF.Tag);
if (tagObj.error) { if (tagObj.error) {
filterObj.error = tagObj.error; filterObj.error = tagObj.error;
return filterObj; return filterObj;
@ -320,31 +320,28 @@ class LifecycleConfiguration {
// reset _tagKeys to empty because keys cannot overlap within a rule, // reset _tagKeys to empty because keys cannot overlap within a rule,
// but different rules can have the same tag keys // but different rules can have the same tag keys
this._tagKeys = []; this._tagKeys = [];
if (!tags.Key || !tags.Value) { for (let i = 0; i < tags.length; i++) {
tagObj.error = errors.MissingRequiredParameter.customizeDescription( if (!tags[i].Key || !tags[i].Value) {
'Tag XML does not contain both Key and Value'); tagObj.error =
return tagObj; errors.MissingRequiredParameter.customizeDescription(
} 'Tag XML does not contain both Key and Value');
if (tags.Key.length !== tags.Value.length) { break;
tagObj.error = errors.MalformedXML.customizeDescription( }
'Tag XML should contain same number of Keys and Values');
return tagObj; if (tags[i].Key[0].length < 1 || tags[i].Key[0].length > 128) {
}
for (let i = 0; i < tags.Key.length; i++) {
if (tags.Key[i].length < 1 || tags.Key[i].length > 128) {
tagObj.error = errors.InvalidRequest.customizeDescription( tagObj.error = errors.InvalidRequest.customizeDescription(
'Tag Key must be a length between 1 and 128 char'); 'Tag Key must be a length between 1 and 128 char');
break; break;
} }
if (this._tagKeys.includes(tags.Key[i])) { if (this._tagKeys.includes(tags[i].Key[0])) {
tagObj.error = errors.InvalidRequest.customizeDescription( tagObj.error = errors.InvalidRequest.customizeDescription(
'Tag Keys must be unique'); 'Tag Keys must be unique');
break; break;
} }
this._tagKeys.push(tags.Key[i]); this._tagKeys.push(tags[i].Key[0]);
const tag = { const tag = {
key: tags.Key[i], key: tags[i].Key[0],
val: tags.Value[i], val: tags[i].Value[0],
}; };
tagObj.tags.push(tag); tagObj.tags.push(tag);
} }
@ -677,13 +674,12 @@ class LifecycleConfiguration {
const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : ''; const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : '';
let tagXML = ''; let tagXML = '';
if (tags) { if (tags) {
const keysVals = tags.map(t => { tagXML = tags.map(t => {
const { key, val } = t; const { key, val } = t;
const Tag = `<Key>${key}</Key>` + const Tag = `<Tag><Key>${key}</Key>` +
`<Value>${val}</Value>`; `<Value>${val}</Value></Tag>`;
return Tag; return Tag;
}).join(''); }).join('');
tagXML = `<Tag>${keysVals}</Tag>`;
} }
let Filter; let Filter;
if (rulePrefix && !tags) { if (rulePrefix && !tags) {

View File

@ -120,6 +120,7 @@ class ObjectMD {
role: '', role: '',
storageType: '', storageType: '',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}, },
'dataStoreName': '', 'dataStoreName': '',
}; };
@ -648,6 +649,19 @@ class ObjectMD {
return this._data.isDeleteMarker; return this._data.isDeleteMarker;
} }
/**
* Get if the object is a multipart upload (MPU)
*
* The function checks the "content-md5" field: if it contains a
* dash ('-') it is a MPU, as the content-md5 string ends with
* "-[nbparts]" for MPUs.
*
* @return {boolean} Whether object is a multipart upload
*/
isMultipartUpload() {
return this.getContentMd5().includes('-');
}
/** /**
* Set metadata versionId value * Set metadata versionId value
* *
@ -675,7 +689,10 @@ class ObjectMD {
* @return {string} The encoded object versionId * @return {string} The encoded object versionId
*/ */
getEncodedVersionId() { getEncodedVersionId() {
return VersionIDUtils.encode(this.getVersionId()); if (this.getVersionId()) {
return VersionIDUtils.encode(this.getVersionId());
}
return undefined;
} }
/** /**
@ -706,7 +723,7 @@ class ObjectMD {
*/ */
setReplicationInfo(replicationInfo) { setReplicationInfo(replicationInfo) {
const { status, backends, content, destination, storageClass, role, const { status, backends, content, destination, storageClass, role,
storageType, dataStoreVersionId } = replicationInfo; storageType, dataStoreVersionId, isNFS } = replicationInfo;
this._data.replicationInfo = { this._data.replicationInfo = {
status, status,
backends, backends,
@ -716,6 +733,7 @@ class ObjectMD {
role, role,
storageType: storageType || '', storageType: storageType || '',
dataStoreVersionId: dataStoreVersionId || '', dataStoreVersionId: dataStoreVersionId || '',
isNFS: isNFS || null,
}; };
return this; return this;
} }
@ -734,6 +752,24 @@ class ObjectMD {
return this; return this;
} }
/**
* Set whether the replication is occurring from an NFS bucket.
* @param {Boolean} isNFS - Whether replication from an NFS bucket
* @return {ObjectMD} itself
*/
setReplicationIsNFS(isNFS) {
this._data.replicationInfo.isNFS = isNFS;
return this;
}
/**
* Get whether the replication is occurring from an NFS bucket.
* @return {Boolean} Whether replication from an NFS bucket
*/
getReplicationIsNFS() {
return this._data.replicationInfo.isNFS;
}
setReplicationSiteStatus(site, status) { setReplicationSiteStatus(site, status) {
const backend = this._data.replicationInfo.backends const backend = this._data.replicationInfo.backends
.find(o => o.site === site); .find(o => o.site === site);

View File

@ -59,6 +59,7 @@ class ReplicationConfiguration {
this._rules = null; this._rules = null;
this._prevStorageClass = null; this._prevStorageClass = null;
this._hasScalityDestination = null; this._hasScalityDestination = null;
this._preferredReadLocation = null;
} }
/** /**
@ -85,6 +86,18 @@ class ReplicationConfiguration {
return this._rules; return this._rules;
} }
/**
* The preferred read location
* @return {string|null} - The preferred read location if defined,
* otherwise null
*
* FIXME ideally we should be able to specify one preferred read
* location for each rule
*/
getPreferredReadLocation() {
return this._preferredReadLocation;
}
/** /**
* Get the replication configuration * Get the replication configuration
* @return {object} - The replication configuration * @return {object} - The replication configuration
@ -94,6 +107,7 @@ class ReplicationConfiguration {
role: this.getRole(), role: this.getRole(),
destination: this.getDestination(), destination: this.getDestination(),
rules: this.getRules(), rules: this.getRules(),
preferredReadLocation: this.getPreferredReadLocation(),
}; };
} }
@ -292,6 +306,14 @@ class ReplicationConfiguration {
return undefined; return undefined;
} }
const storageClasses = destination.StorageClass[0].split(','); const storageClasses = destination.StorageClass[0].split(',');
const prefReadIndex = storageClasses.findIndex(storageClass =>
storageClass.endsWith(':preferred_read'));
if (prefReadIndex !== -1) {
const prefRead = storageClasses[prefReadIndex].split(':')[0];
// remove :preferred_read tag from storage class name
storageClasses[prefReadIndex] = prefRead;
this._preferredReadLocation = prefRead;
}
const isValidStorageClass = storageClasses.every(storageClass => { const isValidStorageClass = storageClasses.every(storageClass => {
if (validStorageClasses.includes(storageClass)) { if (validStorageClasses.includes(storageClass)) {
this._hasScalityDestination = this._hasScalityDestination =

View File

@ -352,6 +352,8 @@ class Server {
error: err.stack || err, error: err.stack || err,
address: sock.address(), address: sock.address(),
}); });
// socket is not systematically destroyed
sock.destroy();
} }
/** /**

579
lib/network/kmip/Client.js Normal file
View File

@ -0,0 +1,579 @@
'use strict'; // eslint-disable-line
/* eslint new-cap: "off" */
const async = require('async');
const errors = require('../../errors');
const TTLVCodec = require('./codec/ttlv.js');
const TlsTransport = require('./transport/tls.js');
const KMIP = require('.');
const CRYPTOGRAPHIC_OBJECT_TYPE = 'Symmetric Key';
const CRYPTOGRAPHIC_ALGORITHM = 'AES';
const CRYPTOGRAPHIC_CIPHER_MODE = 'CBC';
const CRYPTOGRAPHIC_PADDING_METHOD = 'PKCS5';
const CRYPTOGRAPHIC_LENGTH = 256;
const CRYPTOGRAPHIC_USAGE_MASK = ['Encrypt', 'Decrypt'];
const CRYPTOGRAPHIC_DEFAULT_IV = Buffer.alloc(16).fill(0);
const searchFilter = {
protocolVersionMajor:
'Response Message/Batch Item/' +
'Response Payload/Protocol Version/' +
'Protocol Version Major',
protocolVersionMinor:
'Response Message/Batch Item/' +
'Response Payload/Protocol Version/' +
'Protocol Version Minor',
extensionName:
'Response Message/Batch Item/Response Payload' +
'/Extension Information/Extension Name',
extensionTag:
'Response Message/Batch Item/Response Payload' +
'/Extension Information/Extension Tag',
vendorIdentification:
'Response Message/Batch Item/Response Payload/Vendor Identification',
serverInformation:
'Response Message/Batch Item/Response Payload/Server Information',
operation:
'Response Message/Batch Item/Response Payload/Operation',
objectType:
'Response Message/Batch Item/Response Payload/Object Type',
uniqueIdentifier:
'Response Message/Batch Item/Response Payload/Unique Identifier',
data:
'Response Message/Batch Item/Response Payload/Data',
};
/**
* Normalize errors according to arsenal definitions
* @param {string | Error} err - an Error instance or a message string
* @returns {arsenal.errors} - arsenal error
*/
function _arsenalError(err) {
const messagePrefix = 'KMIP:';
if (typeof err === 'string') {
return errors.InternalError
.customizeDescription(`${messagePrefix} ${err}`);
} else if (err instanceof Error) {
return errors.InternalError
.customizeDescription(`${messagePrefix} ${err.message}`);
}
return errors.InternalError
.customizeDescription(`${messagePrefix} Unspecified error`);
}
/**
* Negotiate with the server the use of a recent version of the protocol and
* update the low level driver with this new knowledge.
* @param {Object} client - The Client instance
* @param {Object} logger - Werelog logger object
* @param {Function} cb - The callback triggered after the negotiation.
* @returns {undefined}
*/
function _negotiateProtocolVersion(client, logger, cb) {
return client.kmip.request(logger, 'Discover Versions', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 4),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 3),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 2),
]),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::negotiateProtocolVersion',
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const majorVersions =
response.lookup(searchFilter.protocolVersionMajor);
const minorVersions =
response.lookup(searchFilter.protocolVersionMinor);
if (majorVersions.length === 0 ||
majorVersions.length !== minorVersions.length) {
const error = _arsenalError('No suitable protocol version');
logger.error('KMIP::negotiateProtocolVersion',
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
client.kmip.changeProtocolVersion(majorVersions[0], minorVersions[0]);
return cb();
});
}
/**
* Obtain from the server the various extensions defined by the vendor
* and update the low level driver with this new knowledge.
* @param {Object} client - The Client instance
* @param {Object} logger - Werelog logger object
* @param {Function} cb - The callback triggered after the extension mapping
* @returns {undefined}
*/
function _mapExtensions(client, logger, cb) {
return client.kmip.request(logger, 'Query', [
KMIP.Enumeration('Query Function', 'Query Extension Map'),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::mapExtensions',
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const extensionNames = response.lookup(searchFilter.extensionName);
const extensionTags = response.lookup(searchFilter.extensionTag);
if (extensionNames.length !== extensionTags.length) {
const error = _arsenalError('Inconsistent extension list');
logger.error('KMIP::mapExtensions',
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
extensionNames.forEach((extensionName, idx) => {
client.kmip.mapExtension(extensionName, extensionTags[idx]);
});
return cb();
});
}
/**
* Query the Server information and identify its vendor
* @param {Object} client - The Client instance
* @param {Object} logger - Werelog logger object
* @param {Function} cb - The callback triggered after the information discovery
* @returns {undefined}
*/
function _queryServerInformation(client, logger, cb) {
client.kmip.request(logger, 'Query', [
KMIP.Enumeration('Query Function', 'Query Server Information'),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.warn('KMIP::queryServerInformation',
{ error });
/* no error returned, caller can keep going */
return cb();
}
client._setVendorIdentification(
response.lookup(searchFilter.vendorIdentification)[0]);
client._setServerInformation(
JSON.stringify(response.lookup(searchFilter.serverInformation)[0]));
logger.info('KMIP Server identified',
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
negotiatedProtocolVersion: client.kmip.protocolVersion });
return cb();
});
}
/**
* Query the Server for the supported operations and managed object types.
* The fact that a server doesn't announce the support for a required feature
* is not a show stopper because some vendor support more or less what they
* announce. If a subsequent request fails, this information can be used to
* figure out the reason for the failure.
* @param {Object} client - The Client instance
* @param {Object} logger - Werelog logger object
* @param {Function} cb - The callback triggered after the information discovery
* @returns {undefined}
*/
function _queryOperationsAndObjects(client, logger, cb) {
return client.kmip.request(logger, 'Query', [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::queryOperationsAndObjects',
{ error,
vendorIdentification: client.vendorIdentification });
return cb(error);
}
const supportedOperations = response.lookup(searchFilter.operation);
const supportedObjectTypes = response.lookup(searchFilter.objectType);
const supportsEncrypt = supportedOperations.includes('Encrypt');
const supportsDecrypt = supportedOperations.includes('Decrypt');
const supportsActivate = supportedOperations.includes('Activate');
const supportsRevoke = supportedOperations.includes('Revoke');
const supportsCreate = supportedOperations.includes('Create');
const supportsDestroy = supportedOperations.includes('Destroy');
const supportsQuery = supportedOperations.includes('Query');
const supportsSymmetricKeys =
supportedObjectTypes.includes('Symmetric Key');
if (!supportsEncrypt || !supportsDecrypt ||
!supportsActivate || !supportsRevoke ||
!supportsCreate || !supportsDestroy ||
!supportsQuery || !supportsSymmetricKeys) {
/* This should not be considered as an error since some vendors
* are not consistent between what they really support and what
* they announce to support.
*/
logger.warn('KMIP::queryOperationsAndObjects: ' +
'The KMIP Server announces that it ' +
'does not support all of the required features',
{ vendorIdentification: client.vendorIdentification,
serverInformation: client.serverInformation,
supportsEncrypt, supportsDecrypt,
supportsActivate, supportsRevoke,
supportsCreate, supportsDestroy,
supportsQuery, supportsSymmetricKeys });
} else {
logger.info('KMIP Server provides the necessary feature set',
{ vendorIdentification: client.vendorIdentification });
}
return cb();
});
}
class Client {
/**
* Construct a high level KMIP driver suitable for cloudserver
* @param {Object} options - Instance options
* @param {Object} options.kmip - Low level driver options
* @param {Object} options.kmip.client - This high level driver options
* @param {Object} options.kmip.client.compoundCreateActivate -
* Depends on the server's ability. False offers the best
* compatibility. True does not offer a significant
* performance gain, but can be useful in case of unreliable
* time synchronization between the client and the server.
* @param {Object} options.kmip.client.bucketNameAttributeName -
* Depends on the server's ability. Not specifying this
* offers the best compatibility and disable the attachement
* of the bucket name as a key attribute.
* @param {Object} options.kmip.codec - KMIP Codec options
* @param {Object} options.kmip.transport - KMIP Transport options
* @param {Class} CodecClass - diversion for the Codec class,
* defaults to TTLVCodec
* @param {Class} TransportClass - diversion for the Transport class,
* defaults to TlsTransport
*/
constructor(options, CodecClass, TransportClass) {
this.options = options.kmip.client || {};
this.vendorIdentification = '';
this.serverInformation = [];
this.kmip = new KMIP(CodecClass || TTLVCodec,
TransportClass || TlsTransport,
options);
this.kmip.registerHandshakeFunction((logger, cb) => {
this._kmipHandshake(logger, cb);
});
}
/**
* Update this client with the vendor identification of the server
* @param {String} vendorIdentification - Vendor identification string
* @returns {undefined}
*/
_setVendorIdentification(vendorIdentification) {
this.vendorIdentification = vendorIdentification;
}
/**
* Update this client with the information about the server
* @param {Object} serverInformation - Server information object
* @returns {undefined}
*/
_setServerInformation(serverInformation) {
this.serverInformation = serverInformation;
}
/**
* Perform the KMIP level handshake with the server
* @param {Object} logger - Werelog logger object
* @param {Function} cb - Callback to be triggered at the end of the
* handshake. cb(err: Error)
* @returns {undefined}
*/
_kmipHandshake(logger, cb) {
return async.waterfall([
next => _negotiateProtocolVersion(this, logger, next),
next => _mapExtensions(this, logger, next),
next => _queryServerInformation(this, logger, next),
next => _queryOperationsAndObjects(this, logger, next),
], cb);
}
/**
* Activate a cryptographic key managed by the server,
* for a specific bucket. This is a required action to perform after
* the key creation.
* @param {string} keyIdentifier - The bucket key Id
* @param {object} logger - Werelog logger object
* @param {function} cb - The callback(err: Error)
* @returns {undefined}
*/
_activateBucketKey(keyIdentifier, logger, cb) {
return this.kmip.request(logger, 'Activate', [
KMIP.TextString('Unique Identifier', keyIdentifier),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::_activateBucketKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
if (uniqueIdentifier !== keyIdentifier) {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::cipherDataKey',
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, keyIdentifier);
});
}
/**
* Create a new cryptographic key managed by the server,
* for a specific bucket
* @param {string} bucketName - The bucket name
* @param {object} logger - Werelog logger object
* @param {function} cb - The callback(err: Error, bucketKeyId: String)
* @returns {undefined}
*/
createBucketKey(bucketName, logger, cb) {
const attributes = [];
if (!!this.options.bucketNameAttributeName) {
attributes.push(KMIP.Attribute('TextString',
this.options.bucketNameAttributeName,
bucketName));
}
attributes.push(...[
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
KMIP.Attribute('Integer', 'Cryptographic Length',
CRYPTOGRAPHIC_LENGTH),
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
this.kmip.encodeMask('Cryptographic Usage Mask',
CRYPTOGRAPHIC_USAGE_MASK))]);
if (this.options.compoundCreateActivate) {
attributes.push(KMIP.Attribute('Date-Time', 'Activation Date',
new Date(Date.UTC())));
}
return this.kmip.request(logger, 'Create', [
KMIP.Enumeration('Object Type', CRYPTOGRAPHIC_OBJECT_TYPE),
KMIP.Structure('Template-Attribute', attributes),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::createBucketKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const createdObjectType =
response.lookup(searchFilter.objectType)[0];
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
if (createdObjectType !== CRYPTOGRAPHIC_OBJECT_TYPE) {
const error = _arsenalError(
'Server created an object of wrong type');
logger.error('KMIP::createBucketKey',
{ error, createdObjectType });
return cb(error);
}
if (!this.options.compoundCreateActivate) {
return this._activateBucketKey(uniqueIdentifier, logger, cb);
}
return cb(null, uniqueIdentifier);
});
}
/**
* Revoke a cryptographic key managed by the server, for a specific bucket.
* This is a required action to perform before being able to destroy the
* managed key.
* @param {string} bucketKeyId - The bucket key Id
* @param {object} logger - Werelog logger object
* @param {function} cb - The callback(err: Error)
* @returns {undefined}
*/
_revokeBucketKey(bucketKeyId, logger, cb) {
// maybe revoke first
return this.kmip.request(logger, 'Revoke', [
KMIP.TextString('Unique Identifier', bucketKeyId),
KMIP.Structure('Revocation Reason', [
KMIP.Enumeration('Revocation Reason Code',
'Cessation of Operation'),
KMIP.TextString('Revocation Message',
'About to be deleted'),
]),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::_revokeBucketKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
if (uniqueIdentifier !== bucketKeyId) {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::_revokeBucketKey',
{ error, uniqueIdentifier });
return cb(error);
}
return cb();
});
}
/**
* Destroy a cryptographic key managed by the server, for a specific bucket.
* @param {string} bucketKeyId - The bucket key Id
* @param {object} logger - Werelog logger object
* @param {function} cb - The callback(err: Error)
* @returns {undefined}
*/
destroyBucketKey(bucketKeyId, logger, cb) {
return this._revokeBucketKey(bucketKeyId, logger, err => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::destroyBucketKey: revocation failed',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
return this.kmip.request(logger, 'Destroy', [
KMIP.TextString('Unique Identifier', bucketKeyId),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::destroyBucketKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
if (uniqueIdentifier !== bucketKeyId) {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::destroyBucketKey',
{ error, uniqueIdentifier });
return cb(error);
}
return cb();
});
});
}
/**
*
* @param {number} cryptoScheme - crypto scheme version number
* @param {string} masterKeyId - key to retrieve master key
* @param {buffer} plainTextDataKey - data key
* @param {object} logger - werelog logger object
* @param {function} cb - callback
* @returns {undefined}
* @callback called with (err, cipheredDataKey: Buffer)
*/
cipherDataKey(cryptoScheme,
masterKeyId,
plainTextDataKey,
logger,
cb) {
return this.kmip.request(logger, 'Encrypt', [
KMIP.TextString('Unique Identifier', masterKeyId),
KMIP.Structure('Cryptographic Parameters', [
KMIP.Enumeration('Block Cipher Mode',
CRYPTOGRAPHIC_CIPHER_MODE),
KMIP.Enumeration('Padding Method',
CRYPTOGRAPHIC_PADDING_METHOD),
KMIP.Enumeration('Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
]),
KMIP.ByteString('Data', plainTextDataKey),
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::cipherDataKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
const data = response.lookup(searchFilter.data)[0];
if (uniqueIdentifier !== masterKeyId) {
const error = _arsenalError(
'Server did not return the expected identifier');
logger.error('KMIP::cipherDataKey',
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, data);
});
}
/**
*
* @param {number} cryptoScheme - crypto scheme version number
* @param {string} masterKeyId - key to retrieve master key
* @param {buffer} cipheredDataKey - data key
* @param {object} logger - werelog logger object
* @param {function} cb - callback
* @returns {undefined}
* @callback called with (err, plainTextDataKey: Buffer)
*/
decipherDataKey(cryptoScheme,
masterKeyId,
cipheredDataKey,
logger,
cb) {
return this.kmip.request(logger, 'Decrypt', [
KMIP.TextString('Unique Identifier', masterKeyId),
KMIP.Structure('Cryptographic Parameters', [
KMIP.Enumeration('Block Cipher Mode',
CRYPTOGRAPHIC_CIPHER_MODE),
KMIP.Enumeration('Padding Method',
CRYPTOGRAPHIC_PADDING_METHOD),
KMIP.Enumeration('Cryptographic Algorithm',
CRYPTOGRAPHIC_ALGORITHM),
]),
KMIP.ByteString('Data', cipheredDataKey),
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
], (err, response) => {
if (err) {
const error = _arsenalError(err);
logger.error('KMIP::decipherDataKey',
{ error,
serverInformation: this.serverInformation });
return cb(error);
}
const uniqueIdentifier =
response.lookup(searchFilter.uniqueIdentifier)[0];
const data = response.lookup(searchFilter.data)[0];
if (uniqueIdentifier !== masterKeyId) {
const error = _arsenalError(
'Server did not return the right identifier');
logger.error('KMIP::decipherDataKey',
{ error, uniqueIdentifier });
return cb(error);
}
return cb(null, data);
});
}
}
module.exports = Client;

View File

@ -0,0 +1,54 @@
'use strict'; // eslint-disable-line
const assert = require('assert');
function _lookup(decodedTTLV, path) {
const xpath = path.split('/').filter(word => word.length > 0);
const canonicalPath = xpath.join('/');
const obj = decodedTTLV;
let res = [];
assert(Array.isArray(obj));
for (let current = xpath.shift(); current; current = xpath.shift()) {
for (let i = 0; i < obj.length; ++i) {
const cell = obj[i];
if (cell[current]) {
if (xpath.length === 0) {
/* Skip if the search path has not been
* completely consumed yet */
res.push(cell[current].value);
} else {
const subPath = xpath.join('/');
assert(current.length + 1 + subPath.length ===
canonicalPath.length);
const intermediate =
_lookup(cell[current].value, subPath);
res = res.concat(intermediate);
}
}
}
}
return res;
}
class Message {
/**
* Construct a new abstract Message
* @param {Object} content - the content of the message
*/
constructor(content) {
this.content = content;
}
/**
* Lookup the values corresponding to the provided path
* @param {String} path - the path in the hierarchy of the values
* of interest
* @return {Object} - an array of the values matching the provided path
*/
lookup(path) {
return _lookup(this.content, path);
}
}
module.exports = Message;

105
lib/network/kmip/README.md Normal file
View File

@ -0,0 +1,105 @@
# KMIP
Key Management Interoperability Protocol
## Preliminary usage example
```javascript
const {
kmipServerHostName,
clientKey,
clientCert,
serverCert,
rootCa
} = require('./myconfiguration.js');
const assert = require('assert');
const fs = require('fs');
const tls = require('tls');
const werelogs = require('werelogs');
const KMIP = require('arsenal').network.kmip;
const logger = new werelogs.Logger('kmiptest');
const kmip = new KMIP;
const options = {
host: kmipServerHostName,
key: fs.readFileSync(clientKey),
cert: fs.readFileSync(clientCert),
ca: [ fs.readFileSync(serverCert),
fs.readFileSync(rootCa), ],
checkServerIdentity: console.log,
};
const message = KMIP.Message([
KMIP.Structure('Request Message', [
KMIP.Structure('Request Header', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 3),
]),
KMIP.Integer('Maximum Response Size', 3456),
KMIP.Integer('Batch Count', 1),
]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', 'Query'),
KMIP.Structure('Request Payload', [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
KMIP.Enumeration('Query Function', 'Query Server Information'),
KMIP.Enumeration('Query Function', 'Query Extension Map'),
]),
]),
])
]);
const encodedMessage = kmip.encodeMessage(logger, message);
const socket = tls.connect(5696, options, () => {
socket.write(encodedMessage);
});
socket.on('data', (data) => {
const decodedMessage = kmip.decodeMessage(logger, data);
const summary = {
major: decodedMessage.lookup(
'Response Message/Response Header/' +
'Protocol Version/Protocol Version Major')[0],
minor: decodedMessage.lookup(
'Response Message/Response Header/' +
'Protocol Version/Protocol Version Minor')[0],
supportedOperations: decodedMessage.lookup(
'Response Message/Batch Item/Response Payload/Operation'),
supportedObjectTypes: decodedMessage.lookup(
'Response Message/Batch Item/Response Payload/Object Type'),
serverInformation: decodedMessage.lookup(
'Response Message/Batch Item/Response Payload/Server Information'),
};
console.log(JSON.stringify(summary));
//console.log(JSON.stringify(decodedMessage.content));
//console.log(data.toString('hex'));
const protocolVersionMajor =
decodedMessage.lookup('Response Message/Response Header/' +
'Protocol Version/Protocol Version Major');
const protocolVersionMinor =
decodedMessage.lookup('Response Message/Response Header/' +
'Protocol Version/Protocol Version Minor');
assert(summary.supportedOperations.includes('Encrypt'));
assert(summary.supportedOperations.includes('Decrypt'));
assert(summary.supportedOperations.includes('Create'));
assert(summary.supportedOperations.includes('Destroy'));
assert(summary.supportedOperations.includes('Query'));
assert(summary.supportedObjectTypes.includes('Symmetric Key'));
assert(protocolVersionMajor[0] >= 2 ||
(protocolVersionMajor[0] === 1 &&
protocolVersionMinor[0] >= 2));
socket.end();
});
socket.on('end', () => {
console.log('server ends connection');
});
```

View File

@ -0,0 +1,367 @@
# KMIP codecs
The KMIP protocol is based on the exchange of structured messages between a
client and a server.
About the structure of the messages:
* It is composed of fields and nested fields
* It varies depending on the context of the request and who emits the message.
* It follows the same encoding rules for both the client and the server
* The set of primitive types is the cornerstone of the protocol, the structure
description is contained within the messages along with the actual payload.
* The set of defined tags is the keystone of the protocol. It permits to
attribute a meaning to the fields of a structured message.
The role of the codec is twofold.
* To decode a message from a particular encoding, to an abstract
representation of the KMIP structured messages.
* To encode a message from its abstract representation to the particular
encoding.
The codecs are not responsible for sending the messages on the wire.
This task is devoted to the transport layer.
## Abstract representation
The primitive data types defined by the protocol are represented internally
as data structures following the form
```javascript
const abstractKmipField = {
[tagName]: {
type,
value
}
};
```
The tag name `tagName` is a string. It is decoded from the tag value
using the KMIP nomenclature and identify the meaning of the field in the message.
The type name `type` is a string and is one of the primitive types
defined by the KMIP protocol. This element of a field also implicitly carries
the information of length for fixed size data types.
The value `value` is decoded from the payload of the KMIP field. This
element carries the length information for varying data types.
## Constructing an abstract Message
```javascript
const msg = KMIP.Message(content);
```
The static method `KMIP.Message` instantiates an object of the class
`Message`. Message objects wrap the content of the message without
alteration and offer a `lookup` method to search the message for
named fields.
### Structure
```javascript
const field =
KMIP.Structure('Request Header', [
field_1,
...,
field_n,
]);
console.log(field);
{
'Request Header': {
type: 'Structure',
value: [
field_1,
...,
field_n
]
}
}
```
Fields in the array parameter must be provided in the order defined by the
specification for the considered structure name.
### Integer
```javascript
const field = KMIP.Integer('Protocol Version Minor', 3);
console.log(field);
{
'Protocol Version Minor': {
type: "Integer",
value: 3
}
}
```
Integers are encoded as four-byte long (32 bit) binary signed numbers in 2's
complement notation, transmitted big-endian.
### LongInteger
```javascript
const field = KMIP.LongInteger('Usage Limits Total', 10 ** 42);
console.log(field);
{
'Usage Limits Total': {
type: 'LongInteger',
value: 1e+42
}
}
```
Long Integers are encoded as eight-byte long (64 bit) binary signed numbers in
2's complement notation, transmitted big-endian.
Due to an accuracy limitation of number representation, `LongInteger` values
cannot exceed 2^53. It's expected from the codec to throw an error when
attempting to transcode a LongInteger greater than this value.
### BigInteger
```javascript
const field = KMIP.BigInteger('Usage Limits Total', value);
console.log(field);
{
'Usage Limits Total': {
type: 'LongInteger',
value: <Buffer ab cd ef ...>
}
}
```
Big Integers are encoded as a sequence of eight-bit bytes, in two's complement
notation, transmitted big-endian. If the length of the sequence is not a
multiple of eight bytes, then Big Integers SHALL be padded with the minimal
number of leading sign-extended bytes to make the length a multiple of eight
bytes. These padding bytes are part of the Item Value and SHALL be counted in
the Item Length.
### Enumeration
```javascript
const field = KMIP.Enumeration('Operation', 'Discover Versions');
console.log(field);
{
'Operation': {
type: 'Enumeration',
value: 'Discover Versions'
}
}
```
### Boolean
```javascript
const field = KMIP.Boolean('Asynchronous Indicator', false);
console.log(field);
{
'Asynchronous Indicator': {
type: 'Boolean',
value: false
}
}
```
### TextString
```javascript
const field = KMIP.TextString('Username', 'alice');
console.log(field);
{
'Username': {
type: 'TextString',
value: 'alice'
}
}
```
Text Strings are sequences of bytes that encode character values according to
the UTF-8 encoding standard. There SHALL NOT be null-termination at the end of
such strings.
### ByteString
```javascript
const field = KMIP.ByteString('Asynchronous Correlation Value', buffer);
console.log(field);
{
'Username': {
type: 'ByteString',
value: <Buffer ab cd ef ...>
}
}
```
Byte Strings are sequences of bytes containing individual unspecified eight-bit
binary values, and are interpreted in the same sequence order.
### DateTime
```javascript
const field = KMIP.DateTime('Activation Date', new Date);
console.log(field);
{
'Username': {
type: 'ByteString',
value: <Date 2019-01-10T20:41:36.914Z>
}
}
```
DateTime takes a Date object as its second parameter. The millisecond part of
the date is silently discarded and not sent through the Network.
For this particular example, the 'Activation Date' tag is used for illustration
purpose. This is not the appropriate way to instanciate this attribute value and
the special function `KMIP.Attribute` must be used instead of `KMIP.DateTime`.
### Interval
```javascript
const field = KMIP.Interval('Lease Time', 42);
console.log(field);
{
'Lease Time': {
type: "Interval",
value: 42
}
}
```
Intervals are encoded as four-byte long (32 bit) binary unsigned numbers,
transmitted big-endian. They have a resolution of one second.
### Special types
#### Bit Mask
Bit masks are encoded using the `Integer` primitive type relative to an instance
of the KMIP class (e.g. `encodeMask` and `decodemask` are not static class
function but regular methods).
```javascript
const kmip = new KMIP;
const mask = ['Encrypt', 'Decrypt'];
const bitMask = kmip.encodeMask('Cryptographic Usage Mask', mask);
const decodedMask = kmip.decodeMask('Cryptographic Usage Mask', bitMask);
assert.deepStrictEqual(decodedMask, mask);
assert(bitMask === 12);
```
#### Attribute
Attribute names and values are managed in a way that deviates from the general
rule. Particularly when it comes associate the value of an enumeration to its
tag. In the nominal form, the value of an enumeration in a field is retrieved
from the tag of this field. For the case of an Attribute, the tag of the
enumeration is referenced in the `Attribute Name` as a `TextString` and the
encoded enumeration value is stored in the `Attribute value`, hence
disconnecting the value from its tag.
```javascript
const cryptographicAlgorithm =
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm', 'AES'),
const requestPayload =
KMIP.Structure('Request Payload', [
KMIP.Enumeration('Object Type', 'Symmetric Key'),
KMIP.Structure('Template-Attribute', [
KMIP.Attribute('TextString', 'x-Name', 's3-thekey'),
cryptographicAlgorithm,
KMIP.Attribute('Integer', 'Cryptographic Length', 256),
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
kmip.encodeMask('Cryptographic Usage Mask',
['Encrypt', 'Decrypt'])),
KMIP.Attribute('Date-Time', 'Activation Date', new Date),
]),
]);
console.log(cryptographicAlgorithm);
{
'Attribute': {
type: 'Structure',
value: [
{
'Attribute Name': {
type: 'TextString',
value: 'Cryptographic Algorithm'
}
},
{
'Attribute Value': {
type: 'Enumeration'
value: 'AES',
diversion: 'Cryptographic Algorithm'
}
}
]
}
}
```
The `diversion` attribute in the `Attribute Value` structure is used by the
codec to identify the `Enumeration` the value relates to.
## Codec Interface
```javascript
class MyCodec {
/**
* Construct a new instance of the codec
*/
constructor() {}
/**
* Encode a bitmask
* @param {String} tagName - name of the bit mask defining tag
* @param {Array of Strings} value - array of named bits to set in the mask
* @return {Integer} Integer encoded bitmask
*/
encodeMask(tagName, value) {}
/**
* Decode a bitmask
* @param {string} tagName - name of the bit mask defining tag
* @param {Integer} givenMask - bit mask to decode
* @return {Array of Strings} array of named bits set in the given bit mask
*/
decodeMask(tagName, givenMask) {}
/**
* Encode an abstract message
* @param {Object} message - Instance of a KMIP.Message
* @return {Buffer} the encoded message suitable for the transport layer
*/
encode(message) {}
/**
* Decode a raw message, usually received from the transport layer
* @param {Object} logger - a Logger instance
* @param {Buffer} rawMessage - the message to decode
* @return {Object} the decoded message as an instance of KMIP.Message
*/
decode(logger, rawMessage) {}
/**
* Amend the tag nomenclature with a vendor specific extension
* @param {String} tagName - Name of the tag to record
* @param {Integer} tagValue - Tag value represented as an integer
*/
mapExtension(tagName, tagValue) {}
}
```
## Encoding specification links
### TTLV Encoding Baseline Profile
[TTLV Encoding Specification](http://docs.oasis-open.org/kmip/spec/v1.4/os/kmip-spec-v1.4-os.html#_Toc490660911)
### XML Encoding Profile
[XML Encoding Profile Specification](http://docs.oasis-open.org/kmip/profiles/v1.4/csprd01/kmip-profiles-v1.4-csprd01.html#_Toc479342078)
### JSON Encoding Profile
[JSON Encoding Profile Specification](http://docs.oasis-open.org/kmip/profiles/v1.4/csprd01/kmip-profiles-v1.4-csprd01.html#_Toc479342090)

View File

@ -0,0 +1,434 @@
'use strict'; // eslint-disable-line
/* eslint dot-notation: "off" */
const KMIPTags = require('../tags.json');
const KMIPMessage = require('../Message.js');
const UINT32_MAX = Math.pow(2, 32);
function _ttlvPadVector(vec) {
let length = 0;
vec.forEach(buf => {
if (!(buf instanceof Buffer)) {
throw Error('Not a Buffer');
}
length += buf.length;
});
const paddingLength = (Math.ceil(length / 8) * 8) - length;
if (paddingLength > 0) {
vec.push(Buffer.alloc(paddingLength).fill(0));
}
return vec;
}
function _throwError(logger, msg, data) {
logger.error(msg, data);
throw Error(msg);
}
function TTLVCodec() {
if (!new.target) {
return new TTLVCodec();
}
const TagDecoder = JSON.parse(JSON.stringify(KMIPTags));
const TagEncoder = {};
const TypeDecoder = {};
const TypeEncoder = {};
const PrimitiveTypes = {
'01': {
name: 'Structure',
decode: (logger, unusedTag, value) => {
const funcName = 'Structure::decode';
const length = value.length;
let i = 0;
const result = [];
let diversion = null;
while (i < length) {
const element = {};
const elementTag = value.slice(i, i + 3).toString('hex');
const elementType =
value.slice(i + 3, i + 4).toString('hex');
const elementLength = value.readUInt32BE(i + 4);
const property = {};
if (!TypeDecoder[elementType]) {
_throwError(logger,
'Unknown element type',
{ funcName, elementTag, elementType });
}
const elementValue = value.slice(i + 8,
i + 8 + elementLength);
if (elementValue.length !== elementLength) {
_throwError(logger, 'BUG: Wrong buffer size',
{ funcName, elementLength,
bufferLength: elementValue.length });
}
property.type = TypeDecoder[elementType].name;
property.value = TypeDecoder[elementType]
.decode(logger, elementTag, elementValue, diversion);
if (diversion) {
property.diversion = diversion;
diversion = null;
}
const tagInfo = TagDecoder[elementTag];
if (!tagInfo) {
logger.debug('Unknown element tag',
{ funcName, elementTag });
property.tag = elementTag;
element['Unknown Tag'] = property;
} else {
element[tagInfo.name] = property;
if (tagInfo.name === 'Attribute Name') {
if (property.type !== 'TextString') {
_throwError(logger,
'Invalide type',
{ funcName, type: property.type });
}
diversion = property.value;
}
}
i += Math.ceil((8 + elementLength) / 8.0) * 8;
result.push(element);
}
return result;
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type = Buffer.from(TypeEncoder['Structure'].value, 'hex');
const length = Buffer.alloc(4);
let vectorLength = 0;
let encodedValue = [];
value.forEach(item => {
Object.keys(item).forEach(key => {
const itemTagName = key;
const itemType = item[key].type;
const itemValue = item[key].value;
const itemDiversion = item[key].diversion;
if (!TagEncoder[itemTagName]) {
throw Error(`Unknown Tag '${itemTagName}'`);
}
if (!TypeEncoder[itemType]) {
throw Error(`Unknown Type '${itemType}'`);
}
const itemResult =
TypeEncoder[itemType].encode(itemTagName,
itemValue,
itemDiversion);
encodedValue = encodedValue
.concat(_ttlvPadVector(itemResult));
});
});
encodedValue = _ttlvPadVector(encodedValue);
encodedValue.forEach(buf => { vectorLength += buf.length; });
length.writeUInt32BE(vectorLength);
return _ttlvPadVector([tag, type, length, ...encodedValue]);
},
},
'02': {
name: 'Integer',
decode: (logger, tag, value) => {
const funcName = 'Integer::decode';
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
return value.readUInt32BE(0);
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type = Buffer.from(TypeEncoder['Integer'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(4);
const encodedValue = Buffer.alloc(4);
encodedValue.writeUInt32BE(value);
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
'03': {
name: 'LongInteger',
decode: (logger, tag, value) => {
const funcName = 'LongInteger::decode';
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const longUInt = UINT32_MAX * value.readUInt32BE(0) +
value.readUInt32BE(4);
if (longUInt > Number.MAX_SAFE_INTEGER) {
_throwError(logger,
'53-bit overflow',
{ funcName, longUInt });
}
return longUInt;
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type =
Buffer.from(TypeEncoder['LongInteger'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(8);
const encodedValue = Buffer.alloc(8);
encodedValue.writeUInt32BE(Math.floor(value / UINT32_MAX), 0);
encodedValue.writeUInt32BE(value % UINT32_MAX, 4);
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
'04': {
name: 'BigInteger',
decode: (logger, tag, value) => value,
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type =
Buffer.from(TypeEncoder['BigInteger'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(value.length);
return _ttlvPadVector([tag, type, length, value]);
},
},
'05': {
name: 'Enumeration',
decode: (logger, tag, value, diversion) => {
const funcName = 'Enumeration::decode';
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const enumValue = value.toString('hex');
const actualTag = diversion ? TagEncoder[diversion].value : tag;
const enumInfo = TagDecoder[actualTag];
if (!enumInfo ||
!enumInfo.enumeration ||
!enumInfo.enumeration[enumValue]) {
return { tag,
value: enumValue,
message: 'Unknown enumeration value',
diversion,
};
}
return enumInfo.enumeration[enumValue];
},
encode: (tagName, value, diversion) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type =
Buffer.from(TypeEncoder['Enumeration'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(4);
const actualTag = diversion || tagName;
const encodedValue =
Buffer.from(TagEncoder[actualTag].enumeration[value],
'hex');
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
'06': {
name: 'Boolean',
decode: (logger, tag, value) => {
const funcName = 'Boolean::decode';
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const msUInt = value.readUInt32BE(0);
const lsUInt = value.readUInt32BE(4);
return !!(msUInt | lsUInt);
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type = Buffer.from(TypeEncoder['Boolean'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(8);
const encodedValue = Buffer.alloc(8);
encodedValue.writeUInt32BE(0, 0);
encodedValue.writeUInt32BE(value ? 1 : 0, 4);
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
'07': {
name: 'TextString',
decode: (logger, tag, value) => value.toString('utf8'),
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type =
Buffer.from(TypeEncoder['TextString'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(value.length);
return _ttlvPadVector([tag, type, length,
Buffer.from(value, 'utf8')]);
},
},
'08': {
name: 'ByteString',
decode: (logger, tag, value) => value,
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type =
Buffer.from(TypeEncoder['ByteString'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(value.length);
return _ttlvPadVector([tag, type, length, value]);
},
},
'09': {
name: 'Date-Time',
decode: (logger, tag, value) => {
const funcName = 'Date-Time::decode';
const fixedLength = 8;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
const d = new Date(0);
const utcSeconds = UINT32_MAX * value.readUInt32BE(0) +
value.readUInt32BE(4);
if (utcSeconds > Number.MAX_SAFE_INTEGER) {
_throwError(logger,
'53-bit overflow',
{ funcName, utcSeconds });
}
d.setUTCSeconds(utcSeconds);
return d;
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type = Buffer.from(TypeEncoder['Date-Time'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(8);
const encodedValue = Buffer.alloc(8);
const ts = value.getTime() / 1000;
encodedValue.writeUInt32BE(Math.floor(ts / UINT32_MAX), 0);
encodedValue.writeUInt32BE(ts % UINT32_MAX, 4);
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
'0a': {
name: 'Interval',
decode: (logger, tag, value) => {
const funcName = 'Interval::decode';
const fixedLength = 4;
if (fixedLength !== value.length) {
_throwError(logger,
'Length mismatch',
{ funcName, fixedLength,
bufferLength: value.length });
}
return value.readInt32BE(0);
},
encode: (tagName, value) => {
const tag = Buffer.from(TagEncoder[tagName].value, 'hex');
const type = Buffer.from(TypeEncoder['Interval'].value, 'hex');
const length = Buffer.alloc(4);
length.writeUInt32BE(4);
const encodedValue = Buffer.alloc(4);
encodedValue.writeUInt32BE(value);
return _ttlvPadVector([tag, type, length, encodedValue]);
},
},
};
/* Construct TagDecoder */
Object.keys(TagDecoder).forEach(key => {
const element = {};
element.value = key;
if (TagDecoder[key]['enumeration']) {
const enumeration = {};
Object.keys(TagDecoder[key]['enumeration']).forEach(enumValue => {
const enumKey = TagDecoder[key]['enumeration'][enumValue];
enumeration[enumKey] = enumValue;
});
element.enumeration = enumeration;
}
TagEncoder[TagDecoder[key].name] = element;
});
/* Construct TypeDecoder and TypeEncoder */
Object.keys(PrimitiveTypes).forEach(value => {
const name = PrimitiveTypes[value].name;
const encode = PrimitiveTypes[value].encode;
const decode = PrimitiveTypes[value].decode;
TypeDecoder[value] = { name, decode };
TypeEncoder[name] = { value, encode };
});
/* Public Methods Definition */
this.encodeMask = (tagName, value) => {
let mask = 0;
value.forEach(item => {
const enumValue = TagEncoder[tagName].enumeration[item];
if (!enumValue) {
throw Error('Invalid bit name');
}
mask |= parseInt(enumValue, 16);
});
return mask;
};
this.decodeMask = (tagName, givenMask) => {
let mask = givenMask;
const value = [];
const tag = TagEncoder[tagName].value;
Object.keys(TagDecoder[tag].enumeration).forEach(key => {
const bit = Buffer.from(key, 'hex').readUInt32BE(0);
if (bit & mask) {
mask &= ~bit;
value.push(TagDecoder[tag].enumeration[key]);
}
});
return value;
};
this.decode = (logger, rawMessage) => {
const messageContent =
TypeDecoder['01'].decode(logger, null, rawMessage);
return new KMIPMessage(messageContent);
};
this.encode = message => {
const value = message.content;
let result = [];
value.forEach(item => {
Object.keys(item).forEach(key => {
if (!TagEncoder[key]) {
throw Error(`Unknown Tag '${key}'`);
}
const type = item[key].type;
if (!TypeEncoder[type]) {
throw Error(`Unknown Type '${type}'`);
}
const itemValue = TypeEncoder[type].encode(key,
item[key].value,
item[key].diversion);
result = result.concat(_ttlvPadVector(itemValue));
});
});
return Buffer.concat(_ttlvPadVector(result));
};
this.mapExtension = (tagName, tagValue) => {
const tagValueStr = tagValue.toString(16);
TagDecoder[tagValueStr] = { name: tagName };
TagEncoder[tagName] = { value: tagValueStr };
};
return this;
}
module.exports = TTLVCodec;

350
lib/network/kmip/index.js Normal file
View File

@ -0,0 +1,350 @@
'use strict'; // eslint-disable-line
/* eslint new-cap: "off" */
const uuidv4 = require('uuid/v4');
const Message = require('./Message.js');
/* This client requires at least a KMIP 1.2 compatible server */
const DEFAULT_PROTOCOL_VERSION_MAJOR = 1;
const DEFAULT_PROTOCOL_VERSION_MINOR = 2;
/* Response is for one operation, consider raising this value if
* compounding ops */
const DEFAULT_MAXIMUM_RESPONSE_SIZE = 8000;
function _uniqueBatchItemID() {
const theUUID = Buffer.alloc(16);
return uuidv4(null, theUUID);
}
function _PrimitiveType(tagName, type, value) {
return { [tagName]: { type, value } };
}
class KMIP {
/**
* Construct a new KMIP Object
* @param {Class} Codec -
* @param {Class} Transport -
* @param {Object} options -
* @param {Function} cb -
*/
constructor(Codec, Transport, options) {
this.protocolVersion = {
major: DEFAULT_PROTOCOL_VERSION_MAJOR,
minor: DEFAULT_PROTOCOL_VERSION_MINOR,
};
this.maximumResponseSize = DEFAULT_MAXIMUM_RESPONSE_SIZE;
this.options = options.kmip;
this.codec = new Codec(options.kmip.codec);
this.transport = new Transport(options.kmip.transport);
}
/* Static class methods */
/**
* create a new abstract message instance
* @param {Object} content - Most likely a call to KMIP.Structure
* with 'Request Message' as tagName
* @returns {Object} an instance of Message
*/
static Message(content) {
return new Message(content);
}
/**
* Create a KMIP Structure field instance
* @param {String} tagName - Name of the KMIP field
* @param {Array} value - array of KMIP fields
* @returns {Object} an abstract KMIP field
*/
static Structure(tagName, value) {
return _PrimitiveType(tagName, 'Structure', value);
}
/**
* Create a KMIP Integer field instance
* @param {String} tagName - Name of the KMIP field
* @param {Number} value - a number
* @returns {Object} an abstract KMIP field
*/
static Integer(tagName, value) {
return _PrimitiveType(tagName, 'Integer', value);
}
/**
* Create a KMIP Long Integer field instance
* @param {String} tagName - Name of the KMIP field
* @param {Number} value - a number (beware of the 53-bit limitation)
* @returns {Object} an abstract KMIP field
*/
static LongInteger(tagName, value) {
return _PrimitiveType(tagName, 'LongInteger', value);
}
/**
* Create a KMIP Big Integer field instance
* @param {String} tagName - Name of the KMIP field
* @param {Buffer} value - buffer containing the big integer
* @returns {Object} an abstract KMIP field
*/
static BigInteger(tagName, value) {
if (value.length % 8 !== 0) {
throw Error('Big Integer value length must be a multiple of 8');
}
return _PrimitiveType(tagName, 'BigInteger', value);
}
/**
* Create a KMIP Enumeration field instance
* @param {String} tagName - Name of the KMIP Enumeration
* @param {String} value - Name of the KMIP Enumeration value
* @returns {Object} an abstract KMIP field
*/
static Enumeration(tagName, value) {
return _PrimitiveType(tagName, 'Enumeration', value);
}
/**
* Create a KMIP Boolean field instance
* @param {String} tagName - Name of the KMIP field
* @param {Boolean} value - anything falsey or not (converted to a Boolean)
* @returns {Object} an abstract KMIP field
*/
static Boolean(tagName, value) {
return _PrimitiveType(tagName, 'Boolean', !!value);
}
/**
* Create a KMIP Text String field instance
* @param {String} tagName - Name of the KMIP field
* @param {String} value - the text string
* @returns {Object} an abstract KMIP field
*/
static TextString(tagName, value) {
return _PrimitiveType(tagName, 'TextString', value);
}
/**
* Create a KMIP Byte String field instance
* @param {String} tagName - Name of the KMIP field
* @param {Buffer} value - buffer containing the byte string
* @returns {Object} an abstract KMIP field
*/
static ByteString(tagName, value) {
return _PrimitiveType(tagName, 'ByteString', value);
}
/**
* Create a KMIP Date-Time field instance
* @param {String} tagName - Name of the KMIP field
* @param {Date} value - instance of a Date (ms are discarded)
* @returns {Object} an abstract KMIP field
*/
static DateTime(tagName, value) {
value.setMilliseconds(0);
return _PrimitiveType(tagName, 'Date-Time', value);
}
/**
* Create a KMIP Interval field instance
* @param {String} tagName - Name of the KMIP field
* @param {Integer} value - number of seconds of the interval
* @returns {Object} an abstract KMIP field
*/
static Interval(tagName, value) {
return _PrimitiveType(tagName, 'Interval', value);
}
/**
* Create a KMIP Attribute field instance
* @param {String} type - type of the attribute value
* @param {String} name - Name of the attribute or KMIP field
* @param {Object} value - value of the field suitable for the
* specified type
* @returns {Object} an abstract KMIP field
*/
static Attribute(type, name, value) {
if (type === 'Date-Time') {
value.setMilliseconds(0);
}
return {
Attribute: {
type: 'Structure',
value: [
{
'Attribute Name': {
type: 'TextString',
value: name,
},
},
{
'Attribute Value': {
type,
value,
diversion: name,
},
},
],
},
};
}
/* Object methods */
/**
* Register a higher level handshake function to be called
* after the connection is initialized and before the first
* message is sent.
* @param {Function} handshakeFunction - (logger: Object, cb: Function(err))
* @returns {undefined}
*/
registerHandshakeFunction(handshakeFunction) {
this.transport.registerHandshakeFunction(handshakeFunction);
}
/**
* Decode a raw message, usually received from the transport layer
* @param {Object} logger - a Logger instance
* @param {Buffer} rawMessage - the message to decode
* @returns {Object} the decoded message as an instance of KMIP.Message
*/
_decodeMessage(logger, rawMessage) {
return this.codec.decode(logger, rawMessage);
}
/**
* Encode an message
* @param {Object} message - Instance of a KMIP.Message
* @returns {Buffer} the encoded message suitable for the transport layer
*/
_encodeMessage(message) {
return this.codec.encode(message);
}
/**
* Decode a bitmask
* @param {string} tagName - name of the bit mask defining tag
* @param {Integer} givenMask - bit mask to decode
* @returns {Array} array of named bits set in the given bit mask
*/
decodeMask(tagName, givenMask) {
return this.codec.decodeMask(tagName, givenMask);
}
/**
* Encode a bitmask
* @param {String} tagName - name of the bit mask defining tag
* @param {Array} value - array of named bits to set in the mask
* @returns {Integer} Integer encoded bitmask
*/
encodeMask(tagName, value) {
return this.codec.encodeMask(tagName, value);
}
/**
* Amend the tag nomenclature with a vendor specific extension
* @param {String} extensionName - Name of the tag to record
* @param {Integer} extensionTag - Tag value represented as an integer
* @returns {undefined}
*/
mapExtension(extensionName, extensionTag) {
return this.codec.mapExtension(extensionName, extensionTag);
}
changeProtocolVersion(major, minor) {
this.protocolVersion = { major, minor };
}
/**
* Send an operation request message to the KMIP Server
* @param {Object} logger - Werelog logger object
* @param {String} operation - The name of the operation as defined in
* the KMIP protocol specification.
* @param {Object} payload - payload of the operation request. Specifically
* the content of the Request Payload as defined
* by the KMIP protocol specification.
* @param {Function} cb - The callback(error: Object, response: Object)
* @returns {undefined}
*/
request(logger, operation, payload, cb) {
const uuid = _uniqueBatchItemID();
const message = KMIP.Message([
KMIP.Structure('Request Message', [
KMIP.Structure('Request Header', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major',
this.protocolVersion.major),
KMIP.Integer('Protocol Version Minor',
this.protocolVersion.minor)]),
KMIP.Integer('Maximum Response Size',
this.maximumResponseSize),
KMIP.Integer('Batch Count', 1)]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', operation),
KMIP.ByteString('Unique Batch Item ID', uuid),
KMIP.Structure('Request Payload', payload),
])])]);
const encodedMessage = this._encodeMessage(message);
this.transport.send(
logger, encodedMessage,
(err, conversation, rawResponse) => {
if (err) {
logger.error('KMIP::request: Failed to encode message',
{ error: err });
return cb(err);
}
const response = this._decodeMessage(logger, rawResponse);
const performedOperation =
response.lookup('Response Message/' +
'Batch Item/Operation')[0];
const resultStatus =
response.lookup('Response Message/' +
'Batch Item/Result Status')[0];
const resultUniqueBatchItemID =
response.lookup('Response Message/' +
'Batch Item/Unique Batch Item ID')[0];
if (!resultUniqueBatchItemID ||
resultUniqueBatchItemID.compare(uuid) !== 0) {
this.transport.abortPipeline(conversation);
const error = Error('Invalid batch item ID returned');
logger.error('KMIP::request: failed',
{ resultUniqueBatchItemID, uuid, error });
return cb(error);
}
if (performedOperation !== operation) {
this.transport.abortPipeline(conversation);
const error = Error('Operation mismatch',
{ got: performedOperation,
expected: operation });
logger.error('KMIP::request: Operation mismatch',
{ error });
return cb(error);
}
if (resultStatus !== 'Success') {
const resultReason =
response.lookup(
'Response Message/Batch Item/Result Reason')[0];
const resultMessage =
response.lookup(
'Response Message/Batch Item/Result Message')[0];
const error = Error('KMIP request failure',
{ resultStatus,
resultReason,
resultMessage });
logger.error('KMIP::request: request failed',
{ error, resultStatus,
resultReason, resultMessage });
return cb(error);
}
return cb(null, response);
});
}
}
module.exports = KMIP;

579
lib/network/kmip/tags.json Normal file
View File

@ -0,0 +1,579 @@
{
"420006": {
"name": "Asynchronous Correlation Value"
},
"420007": {
"name": "Asynchronous Indicator"
},
"420008": {
"name": "Attribute"
},
"420009": {
"name": "Attribute Index"
},
"42000a": {
"name": "Attribute Name"
},
"42000b": {
"name": "Attribute Value"
},
"42000c": {
"name": "Authentication"
},
"42000d": {
"name": "Batch Count"
},
"42000f": {
"name": "Batch Item"
},
"420011": {
"name": "Block Cipher Mode",
"enumeration": {
"00000001": "CBC",
"00000002": "ECB",
"00000003": "PCBC",
"00000004": "CFB",
"00000005": "OFB",
"00000006": "CTR",
"00000007": "CMAC",
"00000008": "CCM",
"00000009": "GCM",
"0000000a": "CBC-MAC",
"0000000b": "XTS",
"0000000c": "AESKeyWrapPadding",
"0000000d": "NISTKeyWrap",
"0000000e": "X9.102 AESKW",
"0000000f": "X9.102 TDKW",
"00000010": "X9.102 AKW1",
"00000011": "X9.102 AKW2",
"00000012": "AEAD"
}
},
"420028": {
"name": "Cryptographic Algorithm",
"enumeration": {
"00000001": "DES",
"00000002": "3DES",
"00000003": "AES",
"00000004": "RSA",
"00000005": "DSA",
"00000006": "ECDSA",
"00000007": "HMAC-SHA1",
"00000008": "HMAC-SHA224",
"00000009": "HMAC-SHA256",
"0000000a": "HMAC-SHA384",
"0000000b": "HMAC-SHA512",
"0000000c": "HMAC-MD5",
"0000000d": "DH",
"0000000e": "ECDH",
"0000000f": "ECMQV",
"00000010": "Blowfish",
"00000011": "Camellia",
"00000012": "CAST5",
"00000013": "IDEA",
"00000014": "MARS",
"00000015": "RC2",
"00000016": "RC4",
"00000017": "RC5",
"00000018": "SKIPJACK",
"00000019": "Twofish",
"0000001a": "EC",
"0000001b": "One Time Pad",
"0000001c": "ChaCha20",
"0000001d": "Poly1305",
"0000001e": "ChaCha20Poly1305",
"0000001f": "SHA3-224",
"00000020": "SHA3-256",
"00000021": "SHA3-384",
"00000022": "SHA3-512",
"00000023": "HMAC-SHA3-224",
"00000024": "HMAC-SHA3-256",
"00000025": "HMAC-SHA3-384",
"00000026": "HMAC-SHA3-512",
"00000027": "SHAKE-128",
"00000028": "SHAKE-256"
}
},
"42002b": {
"name": "Cryptographic Parameters"
},
"42002c": {
"name": "Cryptographic Usage Mask",
"enumeration": {
"00000001": "Sign",
"00000002": "Verify",
"00000004": "Encrypt",
"00000008": "Decrypt",
"00000010": "Wrap Key",
"00000020": "Unwrap Key",
"00000040": "Export",
"00000080": "MAC Generate",
"00000100": "MAC Verify",
"00000200": "Derive Key",
"00000400": "Content Commitment",
"00000800": "Key Agreement",
"00001000": "Certificate Sign",
"00002000": "CRL Sign",
"00004000": "Generate Cryptogram",
"00008000": "Validate Cryptogram",
"00010000": "Translate Encrypt",
"00020000": "Translate Decrypt",
"00040000": "Translate Wrap",
"00080000": "Translate Unwrap"
}
},
"42003d": {
"name": "IV/Counter/Nonce"
},
"420050": {
"name": "Maximum Response Size"
},
"420054": {
"name": "Name Type",
"enumeration": {
"00000001": "Uninterpreted Text String",
"00000002": "URI"
}
},
"420057": {
"name": "Object Type",
"enumeration": {
"00000001": "Certificate",
"00000002": "Symmetric Key",
"00000003": "Public Key",
"00000004": "Private Key",
"00000005": "Split Key",
"00000006": "Template",
"00000007": "Secret Data",
"00000008": "Opaque Object",
"00000009": "PGP Key"
}
},
"42005c": {
"name": "Operation",
"enumeration": {
"00000001": "Create",
"00000002": "Create Key Pair",
"00000003": "Register",
"00000004": "Re-key",
"00000005": "Derive Key",
"00000006": "Certify",
"00000007": "Re-certify",
"00000008": "Locate",
"00000009": "Check",
"0000000a": "Get",
"0000000b": "Get Attributes",
"0000000c": "Get Attribute List",
"0000000d": "Add Attribute",
"0000000e": "Modify Attribute",
"0000000f": "Delete Attribute",
"00000010": "Obtain Lease",
"00000011": "Get Usage Allocation",
"00000012": "Activate",
"00000013": "Revoke",
"00000014": "Destroy",
"00000015": "Archive",
"00000016": "Recover",
"00000017": "Validate",
"00000018": "Query",
"00000019": "Cancel",
"0000001a": "Poll",
"0000001b": "Notify",
"0000001c": "Put",
"0000001d": "Re-key Key Pair",
"0000001e": "Discover Versions",
"0000001f": "Encrypt",
"00000020": "Decrypt",
"00000021": "Sign",
"00000022": "Signature Verify",
"00000023": "MAC",
"00000024": "MAC Verify",
"00000025": "RNG Retrieve",
"00000026": "RNG Seed",
"00000027": "Hash",
"00000028": "Create Split Key",
"00000029": "Join Split Key",
"0000002a": "Import",
"0000002b": "Export"
}
},
"42005f": {
"name": "Padding Method",
"enumeration": {
"00000001": "None",
"00000002": "OAEP",
"00000003": "PKCS5",
"00000004": "SSL3",
"00000005": "Zeros",
"00000006": "ANSI X9.23",
"00000007": "ISO 10126",
"00000008": "PKCS1 v1.5",
"00000009": "X9.31",
"0000000a": "PSS"
}
},
"420069": {
"name": "Protocol Version"
},
"42006a": {
"name": "Protocol Version Major"
},
"42006b": {
"name": "Protocol Version Minor"
},
"420074": {
"name": "Query Function",
"enumeration": {
"00000001": "Query Operations",
"00000002": "Query Objects",
"00000003": "Query Server Information",
"00000004": "Query Application Namespaces",
"00000005": "Query Extension List",
"00000006": "Query Extension Map",
"00000007": "Query Attestation Types",
"00000008": "Query RNGs",
"00000009": "Query Validations",
"0000000a": "Query Profiles",
"0000000b": "Query Capabilities",
"0000000c": "Query Client Registration Methods"
}
},
"420077": {
"name": "Request Header"
},
"420078": {
"name": "Request Message"
},
"420079": {
"name": "Request Payload"
},
"42007a": {
"name": "Response Header"
},
"42007b": {
"name": "Response Message"
},
"42007c": {
"name": "Response Payload"
},
"42007d": {
"name": "Result Message"
},
"42007e": {
"name": "Result Reason",
"enumeration": {
"00000001": "Item Not Found",
"00000002": "Response Too Large",
"00000003": "Authentication Not Successful",
"00000004": "Invalid Message",
"00000005": "Operation Not Supported",
"00000006": "Missing Data",
"00000007": "Invalid Field",
"00000008": "Feature Not Supported",
"00000009": "Operation Canceled By Requester",
"0000000a": "Cryptographic Failure",
"0000000b": "Illegal Operation",
"0000000c": "Permission Denied",
"0000000d": "Object archived",
"0000000e": "Index Out of Bounds",
"0000000f": "Application Namespace Not Supported",
"00000010": "Key Format Type Not Supported",
"00000011": "Key Compression Type Not Supported",
"00000012": "Encoding Option Error",
"00000013": "Key Value Not Present",
"00000014": "Attestation Required",
"00000015": "Attestation Failed",
"00000016": "Sensitive",
"00000017": "Not Extractable",
"00000018": "Object Already Exists",
"00000100": "General Failure"
}
},
"42007f": {
"name": "Result Status",
"enumeration": {
"00000000": "Success",
"00000001": "Operation Failed",
"00000002": "Operation Pending",
"00000003": "Operation Undone"
}
},
"420080": {
"name": "Revocation Message"
},
"420081": {
"name": "Revocation Reason"
},
"420082": {
"name": "Revocation Reason Code",
"enumeration": {
"00000001": "Unspecified",
"00000002": "Key Compromise",
"00000003": "CA Compromise",
"00000004": "Affiliation Changed",
"00000005": "Superseded",
"00000006": "Cessation of Operation",
"00000007": "Privilege Withdrawn"
}
},
"420088": {
"name": "Server Information"
},
"420091": {
"name": "Template-Attribute"
},
"420092": {
"name": "Time Stamp"
},
"420093": {
"name": "Unique Batch Item ID"
},
"420094": {
"name": "Unique Identifier"
},
"42009d": {
"name": "Vendor Identification"
},
"4200a4": {
"name": "Extension Information"
},
"4200a5": {
"name": "Extension Name"
},
"4200a6": {
"name": "Extension Tag"
},
"4200a7": {
"name": "Extension Type"
},
"4200c2": {
"name": "Data"
},
"4200eb": {
"name": "Profile Information"
},
"4200ec": {
"name": "Profile Name",
"enumeration": {
"00000001": "Baseline Server Basic KMIP v1.2",
"00000002": "Baseline Server TLS v1.2 KMIP v1.2",
"00000003": "Baseline Client Basic KMIP v1.2",
"00000004": "Baseline Client TLS v1.2 KMIP v1.2",
"00000005": "Complete Server Basic KMIP v1.2",
"00000006": "Complete Server TLS v1.2 KMIP v1.2",
"00000007": "Tape Library Client KMIP v1.0",
"00000008": "Tape Library Client KMIP v1.1",
"00000009": "Tape Library Client KMIP v1.2",
"0000000a": "Tape Library Server KMIP v1.0",
"0000000b": "Tape Library Server KMIP v1.1",
"0000000c": "Tape Library Server KMIP v1.2",
"0000000d": "Symmetric Key Lifecycle Client KMIP v1.0",
"0000000e": "Symmetric Key Lifecycle Client KMIP v1.1",
"0000000f": "Symmetric Key Lifecycle Client KMIP v1.2",
"00000010": "Symmetric Key Lifecycle Server KMIP v1.0",
"00000011": "Symmetric Key Lifecycle Server KMIP v1.1",
"00000012": "Symmetric Key Lifecycle Server KMIP v1.2",
"00000013": "Asymmetric Key Lifecycle Client KMIP v1.0",
"00000014": "Asymmetric Key Lifecycle Client KMIP v1.1",
"00000015": "Asymmetric Key Lifecycle Client KMIP v1.2",
"00000016": "Asymmetric Key Lifecycle Server KMIP v1.0",
"00000017": "Asymmetric Key Lifecycle Server KMIP v1.1",
"00000018": "Asymmetric Key Lifecycle Server KMIP v1.2",
"00000019": "Basic Cryptographic Client KMIP v1.2",
"0000001a": "Basic Cryptographic Server KMIP v1.2",
"0000001b": "Advanced Cryptographic Client KMIP v1.2",
"0000001c": "Advanced Cryptographic Server KMIP v1.2",
"0000001d": "RNG Cryptographic Client KMIP v1.2",
"0000001e": "RNG Cryptographic Server KMIP v1.2",
"0000001f": "Basic Symmetric Key Foundry Client KMIP v1.0",
"00000020": "Intermediate Symmetric Key Foundry Client KMIP v1.0",
"00000021": "Advanced Symmetric Key Foundry Client KMIP v1.0",
"00000022": "Basic Symmetric Key Foundry Client KMIP v1.1",
"00000023": "Intermediate Symmetric Key Foundry Client KMIP v1.1",
"00000024": "Advanced Symmetric Key Foundry Client KMIP v1.1",
"00000025": "Basic Symmetric Key Foundry Client KMIP v1.2",
"00000026": "Intermediate Symmetric Key Foundry Client KMIP v1.2",
"00000027": "Advanced Symmetric Key Foundry Client KMIP v1.2",
"00000028": "Symmetric Key Foundry Server KMIP v1.0",
"00000029": "Symmetric Key Foundry Server KMIP v1.1",
"0000002a": "Symmetric Key Foundry Server KMIP v1.2",
"0000002b": "Opaque Managed Object Store Client KMIP v1.0",
"0000002c": "Opaque Managed Object Store Client KMIP v1.1",
"0000002d": "Opaque Managed Object Store Client KMIP v1.2",
"0000002e": "Opaque Managed Object Store Server KMIP v1.0",
"0000002f": "Opaque Managed Object Store Server KMIP v1.1",
"00000030": "Opaque Managed Object Store Server KMIP v1.2",
"00000031": "Suite B minLOS_128 Client KMIP v1.0",
"00000032": "Suite B minLOS_128 Client KMIP v1.1",
"00000033": "Suite B minLOS_128 Client KMIP v1.2",
"00000034": "Suite B minLOS_128 Server KMIP v1.0",
"00000035": "Suite B minLOS_128 Server KMIP v1.1",
"00000036": "Suite B minLOS_128 Server KMIP v1.2",
"00000037": "Suite B minLOS_192 Client KMIP v1.0",
"00000038": "Suite B minLOS_192 Client KMIP v1.1",
"00000039": "Suite B minLOS_192 Client KMIP v1.2",
"0000003a": "Suite B minLOS_192 Server KMIP v1.0",
"0000003b": "Suite B minLOS_192 Server KMIP v1.1",
"0000003c": "Suite B minLOS_192 Server KMIP v1.2",
"0000003d": "Storage Array with Self Encrypting Drive Client KMIP v1.0",
"0000003e": "Storage Array with Self Encrypting Drive Client KMIP v1.1",
"0000003f": "Storage Array with Self Encrypting Drive Client KMIP v1.2",
"00000040": "Storage Array with Self Encrypting Drive Server KMIP v1.0",
"00000041": "Storage Array with Self Encrypting Drive Server KMIP v1.1",
"00000042": "Storage Array with Self Encrypting Drive Server KMIP v1.2",
"00000043": "HTTPS Client KMIP v1.0",
"00000044": "HTTPS Client KMIP v1.1",
"00000045": "HTTPS Client KMIP v1.2",
"00000046": "HTTPS Server KMIP v1.0",
"00000047": "HTTPS Server KMIP v1.1",
"00000048": "HTTPS Server KMIP v1.2",
"00000049": "JSON Client KMIP v1.0",
"0000004a": "JSON Client KMIP v1.1",
"0000004b": "JSON Client KMIP v1.2",
"0000004c": "JSON Server KMIP v1.0",
"0000004d": "JSON Server KMIP v1.1",
"0000004e": "JSON Server KMIP v1.2",
"0000004f": "XML Client KMIP v1.0",
"00000050": "XML Client KMIP v1.1",
"00000051": "XML Client KMIP v1.2",
"00000052": "XML Server KMIP v1.0",
"00000053": "XML Server KMIP v1.1",
"00000054": "XML Server KMIP v1.2",
"00000055": "Baseline Server Basic KMIP v1.3",
"00000056": "Baseline Server TLS v1.2 KMIP v1.3",
"00000057": "Baseline Client Basic KMIP v1.3",
"00000058": "Baseline Client TLS v1.2 KMIP v1.3",
"00000059": "Complete Server Basic KMIP v1.3",
"0000005a": "Complete Server TLS v1.2 KMIP v1.3",
"0000005b": "Tape Library Client KMIP v1.3",
"0000005c": "Tape Library Server KMIP v1.3",
"0000005d": "Symmetric Key Lifecycle Client KMIP v1.3",
"0000005e": "Symmetric Key Lifecycle Server KMIP v1.3",
"0000005f": "Asymmetric Key Lifecycle Client KMIP v1.3",
"00000060": "Asymmetric Key Lifecycle Server KMIP v1.3",
"00000061": "Basic Cryptographic Client KMIP v1.3",
"00000062": "Basic Cryptographic Server KMIP v1.3",
"00000063": "Advanced Cryptographic Client KMIP v1.3",
"00000064": "Advanced Cryptographic Server KMIP v1.3",
"00000065": "RNG Cryptographic Client KMIP v1.3",
"00000066": "RNG Cryptographic Server KMIP v1.3",
"00000067": "Basic Symmetric Key Foundry Client KMIP v1.3",
"00000068": "Intermediate Symmetric Key Foundry Client KMIP v1.3",
"00000069": "Advanced Symmetric Key Foundry Client KMIP v1.3",
"0000006a": "Symmetric Key Foundry Server KMIP v1.3",
"0000006b": "Opaque Managed Object Store Client KMIP v1.3",
"0000006c": "Opaque Managed Object Store Server KMIP v1.3",
"0000006d": "Suite B minLOS_128 Client KMIP v1.3",
"0000006e": "Suite B minLOS_128 Server KMIP v1.3",
"0000006f": "Suite B minLOS_192 Client KMIP v1.3",
"00000070": "Suite B minLOS_192 Server KMIP v1.3",
"00000071": "Storage Array with Self Encrypting Drive Client KMIP v1.3",
"00000072": "Storage Array with Self Encrypting Drive Server KMIP v1.3",
"00000073": "HTTPS Client KMIP v1.3",
"00000074": "HTTPS Server KMIP v1.3",
"00000075": "JSON Client KMIP v1.3",
"00000076": "JSON Server KMIP v1.3",
"00000077": "XML Client KMIP v1.3",
"00000078": "XML Server KMIP v1.3",
"00000079": "Baseline Server Basic KMIP v1.4",
"0000007a": "Baseline Server TLS v1.2 KMIP v1.4",
"0000007b": "Baseline Client Basic KMIP v1.4",
"0000007c": "Baseline Client TLS v1.2 KMIP v1.4",
"0000007d": "Complete Server Basic KMIP v1.4",
"0000007e": "Complete Server TLS v1.2 KMIP v1.4",
"0000007f": "Tape Library Client KMIP v1.4",
"00000080": "Tape Library Server KMIP v1.4",
"00000081": "Symmetric Key Lifecycle Client KMIP v1.4",
"00000082": "Symmetric Key Lifecycle Server KMIP v1.4",
"00000083": "Asymmetric Key Lifecycle Client KMIP v1.4",
"00000084": "Asymmetric Key Lifecycle Server KMIP v1.4",
"00000085": "Basic Cryptographic Client KMIP v1.4",
"00000086": "Basic Cryptographic Server KMIP v1.4",
"00000087": "Advanced Cryptographic Client KMIP v1.4",
"00000088": "Advanced Cryptographic Server KMIP v1.4",
"00000089": "RNG Cryptographic Client KMIP v1.4",
"0000008a": "RNG Cryptographic Server KMIP v1.4",
"0000008b": "Basic Symmetric Key Foundry Client KMIP v1.4",
"0000008c": "Intermediate Symmetric Key Foundry Client KMIP v1.4",
"0000008d": "Advanced Symmetric Key Foundry Client KMIP v1.4",
"0000008e": "Symmetric Key Foundry Server KMIP v1.4",
"0000008f": "Opaque Managed Object Store Client KMIP v1.4",
"00000090": "Opaque Managed Object Store Server KMIP v1.4",
"00000091": "Suite B minLOS_128 Client KMIP v1.4",
"00000092": "Suite B minLOS_128 Server KMIP v1.4",
"00000093": "Suite B minLOS_192 Client KMIP v1.4",
"00000094": "Suite B minLOS_192 Server KMIP v1.4",
"00000095": "Storage Array with Self Encrypting Drive Client KMIP v1.4",
"00000096": "Storage Array with Self Encrypting Drive Server KMIP v1.4",
"00000097": "HTTPS Client KMIP v1.4",
"00000098": "HTTPS Server KMIP v1.4",
"00000099": "JSON Client KMIP v1.4",
"0000009a": "JSON Server KMIP v1.4",
"0000009b": "XML Client KMIP v1.4",
"0000009c": "XML Server KMIP v1.4"
}
},
"4200ed": {
"name": "Server URI"
},
"4200ee": {
"name": "Server Port"
},
"4200ef": {
"name": "Streaming Capability"
},
"4200f0": {
"name": "Asynchronous Capability"
},
"4200f1": {
"name": "Attestation Capability"
},
"4200f2": {
"name": "Unwrap Mode",
"enumeration": {
"00000001": "Unspecified",
"00000002": "Processed",
"00000003": "Not Processed"
}
},
"4200f3": {
"name": "Destroy Action",
"enumeration": {
"00000001": "Unspecified",
"00000002": "Key Material Deleted",
"00000003": "Key Material Shredded",
"00000004": "Meta Data Deleted",
"00000005": "Meta Data Shredded",
"00000006": "Deleted",
"00000007": "Shredded"
}
},
"4200f4": {
"name": "Shredding Algorithm",
"enumeration": {
"00000001": "Unspecified",
"00000002": "Cryptographic",
"00000003": "Unsupported"
}
},
"4200f5": {
"name": "RNG Mode",
"enumeration": {
"00000001": "Unspecified",
"00000002": "Shared Instantiation",
"00000003": "Non-Shared Instantiation"
}
},
"4200f6": {
"name": "Client Registration Method"
},
"4200f7": {
"name": "Capability Information"
},
"420105": {
"name": "Client Correlation Value"
},
"420106": {
"name": "Server Correlation Value"
}
}

View File

@ -0,0 +1,181 @@
'use strict'; // eslint-disable-line
const assert = require('assert');
const DEFAULT_PIPELINE_DEPTH = 8;
const DEFAULT_KMIP_PORT = 5696;
class TransportTemplate {
/**
* Construct a new object of the TransportTemplate class
* @param {Object} channel - Typically the tls object
* @param {Object} options - Instance options
* @param {Number} options.pipelineDepth - depth of the pipeline
* @param {Object} options.tls - Standard TLS socket initialization
* parameters
* @param {Number} options.tls.port - TLS server port to connect to
*/
constructor(channel, options) {
this.channel = channel;
this.options = options;
this.pipelineDepth = Math.max(1, options.pipelineDepth ||
DEFAULT_PIPELINE_DEPTH);
this.callbackPipeline = [];
this.deferedRequests = [];
this.pipelineDrainedCallback = null;
this.handshakeFunction = null;
this.socket = null;
}
/**
* Drain the outstanding and defered request queues by
* calling the associated callback with an error
* @param {Error} error - the error to call the callback function with.
* @returns {undefined}
*/
_drainQueuesWithError(error) {
this.callbackPipeline.forEach(queuedCallback => {
queuedCallback(error);
});
this.deferedRequests.forEach(deferedRequest => {
deferedRequest.cb(error);
});
this.callbackPipeline = [];
this.deferedRequests = [];
}
/**
* Register a higher level handshake function to be called
* after the connection is initialized and before the first
* message is sent.
* @param {Function} handshakeFunction - (logger: Object, cb: Function(err))
* @returns {undefined}
*/
registerHandshakeFunction(handshakeFunction) {
this.handshakeFunction = handshakeFunction;
}
/**
* Create a new conversation (e.g. a socket) between the client
* and the server.
* @param {Object} logger - Werelogs logger object
* @param {Function} readyCallback - callback function to call when the
* conversation is ready to be initiated
* func(err: Error)
* @returns {undefined}
*/
_createConversation(logger, readyCallback) {
try {
const socket = this.channel.connect(
this.options.tls.port || DEFAULT_KMIP_PORT,
this.options.tls,
() => {
socket.on('data', data => {
const queuedCallback = this.callbackPipeline.shift();
queuedCallback(null, socket, data);
if (this.callbackPipeline.length <
this.pipelineDepth &&
this.deferedRequests.length > 0) {
const deferedRequest = this.deferedRequests.shift();
process.nextTick(() => {
this.send(logger,
deferedRequest.encodedMessage,
deferedRequest.cb);
});
} else if (this.callbackPipeline.length === 0 &&
this.deferedRequests.length === 0 &&
this.pipelineDrainedCallback) {
this.pipelineDrainedCallback();
this.pipelineDrainedCallback = null;
}
});
socket.on('end', () => {
const error = Error('Conversation interrupted');
this._drainQueuesWithError(error);
this.socket = null;
});
socket.on('error', err => {
this._drainQueuesWithError(err);
});
if (this.handshakeFunction) {
this.handshakeFunction(logger, readyCallback);
} else {
readyCallback(null);
}
});
this.socket = socket;
} catch (err) {
logger.error();
readyCallback(err);
}
}
_doSend(logger, encodedMessage, cb) {
const socket = this.socket;
if (!socket) {
const error = new Error('Socket to server not available');
logger.error('TransportTemplate::_doSend', { error });
return cb(error);
}
this.callbackPipeline.push(cb);
socket.cork();
socket.write(encodedMessage);
socket.uncork();
return undefined;
}
/**
* Send an encoded message to the server
* @param {Object} logger - Werelogs logger object
* @param {Buffer} encodedMessage - the encoded message to send to the
* server
* @param {Function} cb - (err, conversation, rawResponse)
* @returns {undefined}
*/
send(logger, encodedMessage, cb) {
if (this.callbackPipeline.length >= this.pipelineDepth) {
return this.deferedRequests.push({ encodedMessage, cb });
}
assert(encodedMessage.length !== 0);
if (this.socket === null) {
return this._createConversation(logger, err => {
if (err) {
return cb(err);
}
return this._doSend(logger, encodedMessage, cb);
});
}
return this._doSend(logger, encodedMessage, cb);
}
/**
* Gracefuly interrupt the conversation. If the caller keeps sending
* message after calling this function, the conversation won't
* converge to its end.
* @returns {undefined}
*/
end() {
if (!this.socket) {
return;
}
if (this.callbackPipeline.length !== 0 ||
this.deferedRequests.length !== 0) {
this.pipelineDrainedCallback = this.socket.end.bind(this.socket);
} else {
this.socket.end();
}
}
/**
* Abruptly interrupt the conversation and cancel the outstanding and
* defered requests
* @param {Object} conversation - the conversation to abort
* @returns {undefined}
*/
abortPipeline(conversation) {
conversation.end();
}
}
module.exports = TransportTemplate;

View File

@ -0,0 +1,12 @@
'use strict'; // eslint-disable-line
const tls = require('tls');
const TransportTemplate = require('./TransportTemplate.js');
class TlsTransport extends TransportTemplate {
constructor(options) {
super(tls, options);
}
}
module.exports = TlsTransport;

View File

@ -0,0 +1,97 @@
const httpServer = require('../http/server');
const werelogs = require('werelogs');
const errors = require('../../errors');
function sendError(res, log, error, optMessage) {
res.writeHead(error.code);
let message;
if (optMessage) {
message = optMessage;
} else {
message = error.description || '';
}
log.debug('sending back error response', { httpCode: error.code,
errorType: error.message,
error: message });
res.end(`${JSON.stringify({ errorType: error.message,
errorMessage: message })}\n`);
}
function sendSuccess(res, log, msg) {
res.writeHead(200);
log.debug('replying with success');
const message = msg || 'OK';
res.end(message);
}
function constructEndpoints(ns, path) {
return `/${ns}/${path}`;
}
function checkStub(log) { // eslint-disable-line
return true;
}
class HealthProbeServer extends httpServer {
constructor(params) {
const logging = new werelogs.Logger('HealthProbeServer');
super(params.port, logging);
this.logging = logging;
this.setBindAddress(params.bindAddress || 'localhost');
this._namespace = params.namespace || '_/health';
const livenessURI = constructEndpoints(this._namespace,
params.livenessURI || 'liveness');
const readinessURI = constructEndpoints(this._namespace,
params.readinessURI || 'readiness');
// hooking our request processing function by calling the
// parent's method for that
this.onRequest(this._onRequest);
this._reqHandlers = {};
this._reqHandlers[livenessURI] = this._onLiveness.bind(this);
this._reqHandlers[readinessURI] = this._onReadiness.bind(this);
this._livenessCheck = params.livenessCheck || checkStub;
this._readinessCheck = params.readinessCheck || checkStub;
}
onLiveCheck(f) {
this._livenessCheck = f;
}
onReadyCheck(f) {
this._readinessCheck = f;
}
_onRequest(req, res) {
const log = this.logging.newRequestLogger();
log.debug('request received', { method: req.method,
url: req.url });
if (req.method !== 'GET') {
sendError(res, log, errors.MethodNotAllowed);
}
if (req.url.startsWith(`/${this._namespace}`) &&
req.url in this._reqHandlers) {
this._reqHandlers[req.url](req, res, log);
} else {
sendError(res, log, errors.InvalidURI);
}
}
_onLiveness(req, res, log) {
if (this._livenessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
_onReadiness(req, res, log) {
if (this._readinessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
}
module.exports = HealthProbeServer;

View File

@ -214,7 +214,7 @@ class RESTServer extends httpServer {
if (req.url.startsWith(`${constants.dataFileURL}?`)) { if (req.url.startsWith(`${constants.dataFileURL}?`)) {
const queryParam = url.parse(req.url).query; const queryParam = url.parse(req.url).query;
if (queryParam === 'diskUsage') { if (queryParam === 'diskUsage') {
this.dataStore.getDiskUsage((err, result) => { return this.dataStore.getDiskUsage((err, result) => {
if (err) { if (err) {
return sendError(res, log, err); return sendError(res, log, err);
} }

View File

@ -68,6 +68,31 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
return res; return res;
} }
/**
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
* request against last-modified date of object
* @param {object} headers - headers from request object
* @param {string} lastModified - last modified date of object
* @return {object} contains modifiedSince and unmodifiedSince res objects
*/
function checkDateModifiedHeaders(headers, lastModified) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
lastModifiedDate);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
return { modifiedSinceRes, unmodifiedSinceRes };
}
/** /**
* validateConditionalHeaders - validates 'if-modified-since', * validateConditionalHeaders - validates 'if-modified-since',
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in * 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
@ -79,23 +104,14 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
* empty object if no error * empty object if no error
*/ */
function validateConditionalHeaders(headers, lastModified, contentMD5) { function validateConditionalHeaders(headers, lastModified, contentMD5) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifMatchHeader = headers['if-match'] || const ifMatchHeader = headers['if-match'] ||
headers['x-amz-copy-source-if-match']; headers['x-amz-copy-source-if-match'];
const ifNoneMatchHeader = headers['if-none-match'] || const ifNoneMatchHeader = headers['if-none-match'] ||
headers['x-amz-copy-source-if-none-match']; headers['x-amz-copy-source-if-none-match'];
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5); const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5);
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5); const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5);
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader, const { modifiedSinceRes, unmodifiedSinceRes } =
lastModifiedDate); checkDateModifiedHeaders(headers, lastModified);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
// If-Unmodified-Since condition evaluates to false and If-Match // If-Unmodified-Since condition evaluates to false and If-Match
// is not present, then return the error. Otherwise, If-Unmodified-Since is // is not present, then return the error. Otherwise, If-Unmodified-Since is
// silent when If-Match match, and when If-Match does not match, it's the // silent when If-Match match, and when If-Match does not match, it's the
@ -120,5 +136,6 @@ module.exports = {
_checkEtagNoneMatch, _checkEtagNoneMatch,
_checkModifiedSince, _checkModifiedSince,
_checkUnmodifiedSince, _checkUnmodifiedSince,
checkDateModifiedHeaders,
validateConditionalHeaders, validateConditionalHeaders,
}; };

View File

@ -10,6 +10,8 @@ const routeOPTIONS = require('./routes/routeOPTIONS');
const routesUtils = require('./routesUtils'); const routesUtils = require('./routesUtils');
const routeWebsite = require('./routes/routeWebsite'); const routeWebsite = require('./routes/routeWebsite');
const { objectKeyByteLimit } = require('../constants');
const routeMap = { const routeMap = {
GET: routeGET, GET: routeGET,
PUT: routePUT, PUT: routePUT,
@ -55,8 +57,14 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
blacklistedPrefixes.object); blacklistedPrefixes.object);
if (!result.isValid) { if (!result.isValid) {
log.debug('invalid object key', { objectKey }); log.debug('invalid object key', { objectKey });
return errors.InvalidArgument.customizeDescription('Object key ' + if (result.invalidPrefix) {
`must not start with "${result.invalidPrefix}".`); return errors.InvalidArgument.customizeDescription('Invalid ' +
'prefix - object key cannot start with ' +
`"${result.invalidPrefix}".`);
}
return errors.KeyTooLong.customizeDescription('Object key is too ' +
'long. Maximum number of bytes allowed in keys is ' +
`${objectKeyByteLimit}.`);
} }
} }
if ((reqQuery.partNumber || reqQuery.uploadId) if ((reqQuery.partNumber || reqQuery.uploadId)
@ -167,7 +175,8 @@ function routes(req, res, params, logger) {
logger.newRequestLoggerFromSerializedUids(reqUids) : logger.newRequestLoggerFromSerializedUids(reqUids) :
logger.newRequestLogger()); logger.newRequestLogger());
if (!req.url.startsWith('/_/healthcheck')) { if (!req.url.startsWith('/_/healthcheck') &&
!req.url.startsWith('/_/report')) {
log.info('received request', clientInfo); log.info('received request', clientInfo);
} }

View File

@ -45,6 +45,13 @@ function routeDELETE(request, response, api, log, statsClient) {
return routesUtils.responseNoBody(err, corsHeaders, return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log); response, 204, log);
}); });
} else if (request.query.policy !== undefined) {
return api.callApiMethod('bucketDeletePolicy', request,
response, log, (err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseNoBody(err, corsHeaders,
response, 204, log);
});
} }
api.callApiMethod('bucketDelete', request, response, log, api.callApiMethod('bucketDelete', request, response, log,
(err, corsHeaders) => { (err, corsHeaders) => {

View File

@ -71,6 +71,13 @@ function routerGET(request, response, api, log, statsClient, dataRetrievalFn) {
return routesUtils.responseXMLBody(err, xml, response, log, return routesUtils.responseXMLBody(err, xml, response, log,
corsHeaders); corsHeaders);
}); });
} else if (request.query.policy !== undefined) {
api.callApiMethod('bucketGetPolicy', request, response, log,
(err, json, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
return routesUtils.responseJSONBody(err, json, response,
log, corsHeaders);
});
} else { } else {
// GET bucket // GET bucket
api.callApiMethod('bucketGet', request, response, log, api.callApiMethod('bucketGet', request, response, log,

View File

@ -60,6 +60,13 @@ function routePUT(request, response, api, log, statsClient) {
routesUtils.responseNoBody(err, corsHeaders, response, 200, routesUtils.responseNoBody(err, corsHeaders, response, 200,
log); log);
}); });
} else if (request.query.policy !== undefined) {
api.callApiMethod('bucketPutPolicy', request, response, log,
(err, corsHeaders) => {
routesUtils.statsReport500(err, statsClient);
routesUtils.responseNoBody(err, corsHeaders, response, 200,
log);
});
} else { } else {
// PUT bucket // PUT bucket
return api.callApiMethod('bucketPut', request, response, log, return api.callApiMethod('bucketPut', request, response, log,

View File

@ -4,6 +4,8 @@ const errors = require('../errors');
const constants = require('../constants'); const constants = require('../constants');
const { eachSeries } = require('async'); const { eachSeries } = require('async');
const { objectKeyByteLimit } = require('../constants');
const responseErr = new Error(); const responseErr = new Error();
responseErr.code = 'ResponseError'; responseErr.code = 'ResponseError';
responseErr.message = 'response closed by client request before all data sent'; responseErr.message = 'response closed by client request before all data sent';
@ -284,13 +286,19 @@ function retrieveData(locations, retrieveDataFn, response, log) {
response.destroy(); response.destroy();
responseDestroyed = true; responseDestroyed = true;
}; };
const _destroyReadable = readable => {
// s3-data sends Readable stream only which does not implement destroy
if (readable && readable.destroy) {
readable.destroy();
}
};
// the S3-client might close the connection while we are processing it // the S3-client might close the connection while we are processing it
response.once('close', () => { response.once('close', () => {
log.debug('received close event before response end'); log.debug('received close event before response end');
responseDestroyed = true; responseDestroyed = true;
if (currentStream) { _destroyReadable(currentStream);
currentStream.destroy();
}
}); });
return eachSeries(locations, return eachSeries(locations,
@ -311,7 +319,7 @@ function retrieveData(locations, retrieveDataFn, response, log) {
if (responseDestroyed || response.isclosed) { if (responseDestroyed || response.isclosed) {
log.debug( log.debug(
'response destroyed before readable could stream'); 'response destroyed before readable could stream');
readable.destroy(); _destroyReadable(readable);
return next(responseErr); return next(responseErr);
} }
// readable stream successfully consumed // readable stream successfully consumed
@ -868,6 +876,9 @@ const routesUtils = {
if (invalidPrefix) { if (invalidPrefix) {
return { isValid: false, invalidPrefix }; return { isValid: false, invalidPrefix };
} }
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
return { isValid: false };
}
return { isValid: true }; return { isValid: true };
}, },

View File

@ -10,6 +10,7 @@ const errors = require('../../../errors');
const stringHash = require('../../../stringHash'); const stringHash = require('../../../stringHash');
const jsutil = require('../../../jsutil'); const jsutil = require('../../../jsutil');
const storageUtils = require('../../utils'); const storageUtils = require('../../utils');
const releasePageCacheSync = require('./utils');
// The FOLDER_HASH constant refers to the number of base directories // The FOLDER_HASH constant refers to the number of base directories
// used for directory hashing of stored objects. // used for directory hashing of stored objects.
@ -43,6 +44,8 @@ class DataFileStore {
* sync calls that ensure files and directories are fully * sync calls that ensure files and directories are fully
* written on the physical drive before returning an * written on the physical drive before returning an
* answer. Used to speed up unit tests, may have other uses. * answer. Used to speed up unit tests, may have other uses.
* @param {Boolean} [dataConfig.noCache=false] - If true, attempt
* to free page caches associated with the managed files
* @param {werelogs.API} [logApi] - object providing a constructor function * @param {werelogs.API} [logApi] - object providing a constructor function
* for the Logger object * for the Logger object
*/ */
@ -50,6 +53,7 @@ class DataFileStore {
this.logger = new (logApi || werelogs).Logger('DataFileStore'); this.logger = new (logApi || werelogs).Logger('DataFileStore');
this.dataPath = dataConfig.dataPath; this.dataPath = dataConfig.dataPath;
this.noSync = dataConfig.noSync || false; this.noSync = dataConfig.noSync || false;
this.noCache = dataConfig.noCache || false;
} }
/** /**
@ -149,10 +153,29 @@ class DataFileStore {
return cbOnce(null, key); return cbOnce(null, key);
} }
if (this.noSync) { if (this.noSync) {
/*
* It's not guaranteed that the Kernel will release page
* caches when this.noSync is true. If you want to ensure
* this behavior, set this.noSync to false.
*/
if (this.noCache) {
releasePageCacheSync(filePath, fd, log);
}
fs.closeSync(fd); fs.closeSync(fd);
return ok(); return ok();
} }
fs.fsync(fd, err => { fs.fsync(fd, err => {
/*
* Disabling the caching of stored files is
* temporary fix for
* https://github.com/kubernetes/kubernetes/issues/43916
* that causes cache memory to be accounted as RSS memory
* for the pod and can potentially cause the pod
* to be killed under memory pressure:
*/
if (this.noCache) {
releasePageCacheSync(filePath, fd, log);
}
fs.close(fd, err => { fs.close(fd, err => {
if (err) { if (err) {
log.error('error closing fd after write', log.error('error closing fd after write',
@ -193,6 +216,15 @@ class DataFileStore {
return cbOnce(errors.InternalError.customizeDescription( return cbOnce(errors.InternalError.customizeDescription(
`read stream error: ${err.code}`)); `read stream error: ${err.code}`));
}); });
dataStream.on('close', () => {
// this means the underlying socket has been closed
log.debug('Client closed socket while streaming',
{ method: 'put', key, filePath });
// destroying the write stream forces a close(fd)
fileStream.destroy();
// we need to unlink the file ourselves
fs.unlinkSync(filePath);
});
return undefined; return undefined;
}); });
} }
@ -241,7 +273,7 @@ class DataFileStore {
flags: 'r', flags: 'r',
encoding: null, encoding: null,
fd: null, fd: null,
autoClose: true, autoClose: false,
}; };
if (byteRange) { if (byteRange) {
readStreamOptions.start = byteRange[0]; readStreamOptions.start = byteRange[0];
@ -251,18 +283,31 @@ class DataFileStore {
{ method: 'get', key, filePath, byteRange }); { method: 'get', key, filePath, byteRange });
const cbOnce = jsutil.once(callback); const cbOnce = jsutil.once(callback);
const rs = fs.createReadStream(filePath, readStreamOptions) const rs = fs.createReadStream(filePath, readStreamOptions)
.on('error', err => { .on('error', err => {
if (err.code === 'ENOENT') { if (err.code === 'ENOENT') {
return cbOnce(errors.ObjNotFound); return cbOnce(errors.ObjNotFound);
} }
log.error('error retrieving file', log.error('error retrieving file',
{ method: 'get', key, filePath, { method: 'DataFileStore.get', key, filePath,
error: err }); error: err });
return cbOnce( return cbOnce(
errors.InternalError.customizeDescription( errors.InternalError.customizeDescription(
`filesystem read error: ${err.code}`)); `filesystem read error: ${err.code}`));
}) })
.on('open', () => { cbOnce(null, rs); }); .on('open', () => { cbOnce(null, rs); })
.on('end', () => {
if (this.noCache) {
releasePageCacheSync(filePath, rs.fd, log);
}
fs.close(rs.fd, err => {
if (err) {
log.error('unable to close file descriptor', {
method: 'DataFileStore.get', key, filePath,
error: err,
});
}
});
});
} }
/** /**

View File

@ -0,0 +1,19 @@
const posixFadvise = require('fcntl');
/**
* Release free cached pages associated with a file
*
* @param {String} filePath - absolute path of the associated file
* @param {Int} fd - file descriptor of the associated file
* @param {werelogs.RequestLogger} log - logging object
* @return {undefined}
*/
function releasePageCacheSync(filePath, fd, log) {
const ret = posixFadvise(fd, 0, 0, 4);
if (ret !== 0) {
log.warning(
`error fadv_dontneed ${filePath} returned ${ret}`);
}
}
module.exports = releasePageCacheSync;

View File

@ -87,6 +87,7 @@ class MetadataWrapper {
database: params.mongodb.database, database: params.mongodb.database,
replicationGroupId: params.replicationGroupId, replicationGroupId: params.replicationGroupId,
path: params.mongodb.path, path: params.mongodb.path,
config: params.config,
logger, logger,
}); });
this.implName = 'mongoclient'; this.implName = 'mongoclient';
@ -109,7 +110,7 @@ class MetadataWrapper {
if (this.client.setup) { if (this.client.setup) {
return this.client.setup(done); return this.client.setup(done);
} }
return process.nextTick(() => done); return process.nextTick(done);
} }
createBucket(bucketName, bucketMD, log, cb) { createBucket(bucketName, bucketMD, log, cb) {

View File

@ -0,0 +1,421 @@
const cluster = require('cluster');
const async = require('async');
const errors = require('../../../errors');
const BucketInfo = require('../../../models/BucketInfo');
const list = require('../../../algos/list/exportAlgos');
const MetadataFileClient = require('./MetadataFileClient');
const versionSep =
require('../../../versioning/constants')
.VersioningConstants.VersionId.Separator;
const METASTORE = '__metastore';
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
class BucketFileInterface {
/**
* @constructor
* @param {object} [params] - constructor params
* @param {boolean} [params.noDbOpen=false] - true to skip DB open
* @param {object} logger - logger
* (for unit tests only)
*/
constructor(params, logger) {
this.logger = logger;
const { host, port } = params.metadataClient;
this.constants = params.constants;
this.mdClient = new MetadataFileClient({ host, port });
if (params && params.noDbOpen) {
return;
}
this.lastItemScanTime = null;
this.lastItemScanResult = null;
}
setup(done) {
return this.mdClient.openDB((err, value) => {
if (err) {
throw err;
}
// the metastore sublevel is used to store bucket attributes
this.mdDB = value;
this.metastore = this.mdDB.openSub(METASTORE);
if (cluster.isMaster) {
this.setupMetadataServer(done);
}
});
}
setupMetadataServer(done) {
/* Since the bucket creation API is expecting the
usersBucket to have attributes, we pre-create the
usersBucket attributes here */
this.mdClient.logger.debug('setting up metadata server');
const usersBucketAttr = new BucketInfo(this.constants.usersBucket,
'admin', 'admin', new Date().toJSON(),
BucketInfo.currentModelVersion());
return this.metastore.put(
this.constants.usersBucket,
usersBucketAttr.serialize(), {}, err => {
if (err) {
this.logger.fatal('error writing usersBucket ' +
'attributes to metadata',
{ error: err });
throw (errors.InternalError);
}
return done();
});
}
/**
* Load DB if exists
* @param {String} bucketName - name of bucket
* @param {Object} log - logger
* @param {function} cb - callback(err, db, attr)
* @return {undefined}
*/
loadDBIfExists(bucketName, log, cb) {
this.getBucketAttributes(bucketName, log, (err, attr) => {
if (err) {
return cb(err);
}
try {
const db = this.mdDB.openSub(bucketName);
return cb(null, db, attr);
} catch (err) {
return cb(errors.InternalError);
}
});
return undefined;
}
createBucket(bucketName, bucketMD, log, cb) {
this.getBucketAttributes(bucketName, log, err => {
if (err && err !== errors.NoSuchBucket) {
return cb(err);
}
if (err === undefined) {
return cb(errors.BucketAlreadyExists);
}
this.lastItemScanTime = null;
this.putBucketAttributes(bucketName,
bucketMD,
log, cb);
return undefined;
});
}
getBucketAttributes(bucketName, log, cb) {
this.metastore
.withRequestLogger(log)
.get(bucketName, {}, (err, data) => {
if (err) {
if (err.ObjNotFound) {
return cb(errors.NoSuchBucket);
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting db attributes', logObj);
return cb(errors.InternalError);
}
return cb(null, BucketInfo.deSerialize(data));
});
return undefined;
}
getBucketAndObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db, bucketAttr) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log)
.get(objName, params, (err, objAttr) => {
if (err) {
if (err.ObjNotFound) {
return cb(null, {
bucket: bucketAttr.serialize(),
});
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting object', logObj);
return cb(errors.InternalError);
}
return cb(null, {
bucket: bucketAttr.serialize(),
obj: objAttr,
});
});
return undefined;
});
return undefined;
}
putBucketAttributes(bucketName, bucketMD, log, cb) {
this.metastore
.withRequestLogger(log)
.put(bucketName, bucketMD.serialize(), {}, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error putting db attributes', logObj);
return cb(errors.InternalError);
}
return cb();
});
return undefined;
}
deleteBucket(bucketName, log, cb) {
this.metastore
.withRequestLogger(log)
.del(bucketName, {}, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error deleting bucket',
logObj);
return cb(errors.InternalError);
}
this.lastItemScanTime = null;
return cb();
});
return undefined;
}
putObject(bucketName, objName, objVal, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log)
.put(objName, JSON.stringify(objVal), params, (err, data) => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error putting object', logObj);
return cb(errors.InternalError);
}
return cb(err, data);
});
return undefined;
});
}
getObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log).get(objName, params, (err, data) => {
if (err) {
if (err.ObjNotFound) {
return cb(errors.NoSuchKey);
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting object', logObj);
return cb(errors.InternalError);
}
return cb(null, JSON.parse(data));
});
return undefined;
});
}
deleteObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log).del(objName, params, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error deleting object', logObj);
return cb(errors.InternalError);
}
return cb();
});
return undefined;
});
}
/**
* This complex function deals with different extensions of bucket listing:
* Delimiter based search or MPU based search.
* @param {String} bucketName - The name of the bucket to list
* @param {Object} params - The params to search
* @param {Object} log - The logger object
* @param {function} cb - Callback when done
* @return {undefined}
*/
internalListObject(bucketName, params, log, cb) {
const extName = params.listingType;
const extension = new list[extName](params, log);
const requestParams = extension.genMDParams();
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
let cbDone = false;
db.withRequestLogger(log)
.createReadStream(requestParams, (err, stream) => {
if (err) {
return cb(err);
}
stream
.on('data', e => {
if (extension.filter(e) < 0) {
stream.emit('end');
stream.destroy();
}
})
.on('error', err => {
if (!cbDone) {
cbDone = true;
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error listing objects', logObj);
cb(errors.InternalError);
}
})
.on('end', () => {
if (!cbDone) {
cbDone = true;
const data = extension.result();
cb(null, data);
}
});
return undefined;
});
return undefined;
});
}
listObject(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}
listMultipartUploads(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}
getUUID(log, cb) {
return this.mdDB.getUUID(cb);
}
getDiskUsage(cb) {
return this.mdDB.getDiskUsage(cb);
}
countItems(log, cb) {
if (this.lastItemScanTime !== null &&
(Date.now() - this.lastItemScanTime) <= itemScanRefreshDelay) {
return process.nextTick(cb, null, this.lastItemScanResult);
}
const params = {};
const extension = new list.Basic(params, log);
const requestParams = extension.genMDParams();
const res = {
objects: 0,
versions: 0,
buckets: 0,
bucketList: [],
};
let cbDone = false;
this.mdDB.rawListKeys(requestParams, (err, stream) => {
if (err) {
return cb(err);
}
stream
.on('data', e => {
if (!e.includes(METASTORE)) {
if (e.includes(this.constants.usersBucket)) {
res.buckets++;
res.bucketList.push({
name: e.split(this.constants.splitter)[1],
});
} else if (e.includes(versionSep)) {
res.versions++;
} else if (!e.includes('..recordLogs#s3-recordlog')) {
res.objects++;
}
}
})
.on('error', err => {
if (!cbDone) {
cbDone = true;
const logObj = {
error: err,
errorMessage: err.message,
errorStack: err.stack,
};
log.error('error listing objects', logObj);
cb(errors.InternalError);
}
})
.on('end', () => {
if (!cbDone) {
cbDone = true;
async.eachSeries(res.bucketList, (bucket, cb) => {
this.getBucketAttributes(bucket.name, log,
(err, bucketInfo) => {
if (err) {
return cb(err);
}
/* eslint-disable no-param-reassign */
bucket.location =
bucketInfo.getLocationConstraint();
/* eslint-enable no-param-reassign */
return cb();
});
}, err => {
if (!err) {
this.lastItemScanTime = Date.now();
this.lastItemScanResult = res;
}
return cb(err, res);
});
}
return undefined;
});
return undefined;
});
return undefined;
}
}
module.exports = BucketFileInterface;

View File

@ -58,8 +58,7 @@ class MetadataFileClient {
logger: this.logger, logger: this.logger,
callTimeoutMs: this.callTimeoutMs, callTimeoutMs: this.callTimeoutMs,
}); });
dbClient.connect(done); return dbClient.connect(() => done(null, dbClient));
return dbClient;
} }
/** /**

View File

@ -0,0 +1,32 @@
const ListResult = require('./ListResult');
class ListMultipartUploadsResult extends ListResult {
constructor() {
super();
this.Uploads = [];
this.NextKeyMarker = undefined;
this.NextUploadIdMarker = undefined;
}
addUpload(uploadInfo) {
this.Uploads.push({
key: decodeURIComponent(uploadInfo.key),
value: {
UploadId: uploadInfo.uploadId,
Initiator: {
ID: uploadInfo.initiatorID,
DisplayName: uploadInfo.initiatorDisplayName,
},
Owner: {
ID: uploadInfo.ownerID,
DisplayName: uploadInfo.ownerDisplayName,
},
StorageClass: uploadInfo.storageClass,
Initiated: uploadInfo.initiated,
},
});
this.MaxKeys += 1;
}
}
module.exports = ListMultipartUploadsResult;

View File

@ -0,0 +1,27 @@
class ListResult {
constructor() {
this.IsTruncated = false;
this.NextMarker = undefined;
this.CommonPrefixes = [];
/*
Note: this.MaxKeys will get incremented as
keys are added so that when response is returned,
this.MaxKeys will equal total keys in response
(with each CommonPrefix counting as 1 key)
*/
this.MaxKeys = 0;
}
addCommonPrefix(prefix) {
if (!this.hasCommonPrefix(prefix)) {
this.CommonPrefixes.push(prefix);
this.MaxKeys += 1;
}
}
hasCommonPrefix(prefix) {
return (this.CommonPrefixes.indexOf(prefix) !== -1);
}
}
module.exports = ListResult;

View File

@ -0,0 +1,62 @@
# bucket_mem design
## RATIONALE
The bucket API will be used for managing buckets behind the S3 interface.
We plan to have only 2 backends using this interface:
* One production backend
* One debug backend purely in memory
One important remark here is that we don't want an abstraction but a
duck-typing style interface (different classes MemoryBucket and Bucket having
the same methods putObjectMD(), getObjectMD(), etc).
Notes about the memory backend: The backend is currently a simple key/value
store in memory. The functions actually use nextTick() to emulate the future
asynchronous behavior of the production backend.
## BUCKET API
The bucket API is a very simple API with 5 functions:
- putObjectMD(): put metadata for an object in the bucket
- getObjectMD(): get metadata from the bucket
- deleteObjectMD(): delete metadata for an object from the bucket
- deleteBucketMD(): delete a bucket
- getBucketListObjects(): perform the complex bucket listing AWS search
function with various flavors. This function returns a response in a
ListBucketResult object.
getBucketListObjects(prefix, marker, delimiter, maxKeys, callback) behavior is
the following:
prefix (not required): Limits the response to keys that begin with the
specified prefix. You can use prefixes to separate a bucket into different
groupings of keys. (You can think of using prefix to make groups in the same
way you'd use a folder in a file system.)
marker (not required): Specifies the key to start with when listing objects in
a bucket. Amazon S3 returns object keys in alphabetical order, starting with
key after the marker in order.
delimiter (not required): A delimiter is a character you use to group keys.
All keys that contain the same string between the prefix, if specified, and the
first occurrence of the delimiter after the prefix are grouped under a single
result element, CommonPrefixes. If you don't specify the prefix parameter, then
the substring starts at the beginning of the key. The keys that are grouped
under CommonPrefixes are not returned elsewhere in the response.
maxKeys: Sets the maximum number of keys returned in the response body. You can
add this to your request if you want to retrieve fewer than the default 1000
keys. The response might contain fewer keys but will never contain more. If
there are additional keys that satisfy the search criteria but were not
returned because maxKeys was exceeded, the response contains an attribute of
IsTruncated set to true and a NextMarker. To return the additional keys, call
the function again using NextMarker as your marker argument in the function.
Any key that does not contain the delimiter will be returned individually in
Contents rather than in CommonPrefixes.
If there is an error, the error subfield is returned in the response.

View File

@ -0,0 +1,34 @@
function markerFilterMPU(allMarkers, array) {
const { keyMarker, uploadIdMarker } = allMarkers;
// 1. if the item key matches the keyMarker and an uploadIdMarker exists,
// find the first uploadId in the array that is alphabetically after
// uploadIdMarker
// 2. if the item key does not match the keyMarker, find the first uploadId
// in the array that is alphabetically after keyMarker
const firstUnfilteredIndex = array.findIndex(
item => (uploadIdMarker && item.key === keyMarker ?
item.uploadId > uploadIdMarker :
item.key > keyMarker));
return firstUnfilteredIndex !== -1 ? array.slice(firstUnfilteredIndex) : [];
}
function prefixFilter(prefix, array) {
for (let i = 0; i < array.length; i++) {
if (array[i].indexOf(prefix) !== 0) {
array.splice(i, 1);
i--;
}
}
return array;
}
function isKeyInContents(responseObject, key) {
return responseObject.Contents.some(val => val.key === key);
}
module.exports = {
markerFilterMPU,
prefixFilter,
isKeyInContents,
};

View File

@ -0,0 +1,148 @@
const errors = require('../../../errors');
const { markerFilterMPU, prefixFilter } = require('./bucket_utilities');
const ListMultipartUploadsResult = require('./ListMultipartUploadsResult');
const { metadata } = require('./metadata');
const defaultMaxKeys = 1000;
function getMultipartUploadListing(bucket, params, callback) {
const { delimiter, keyMarker,
uploadIdMarker, prefix, queryPrefixLength, splitter } = params;
const splitterLen = splitter.length;
const maxKeys = params.maxKeys !== undefined ?
Number.parseInt(params.maxKeys, 10) : defaultMaxKeys;
const response = new ListMultipartUploadsResult();
const keyMap = metadata.keyMaps.get(bucket.getName());
if (prefix) {
response.Prefix = prefix;
if (typeof prefix !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (keyMarker) {
response.KeyMarker = keyMarker;
if (typeof keyMarker !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (uploadIdMarker) {
response.UploadIdMarker = uploadIdMarker;
if (typeof uploadIdMarker !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (delimiter) {
response.Delimiter = delimiter;
if (typeof delimiter !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (maxKeys && typeof maxKeys !== 'number') {
return callback(errors.InvalidArgument);
}
// Sort uploads alphatebetically by objectKey and if same objectKey,
// then sort in ascending order by time initiated
let uploads = [];
keyMap.forEach((val, key) => {
uploads.push(key);
});
uploads.sort((a, b) => {
const aIndex = a.indexOf(splitter);
const bIndex = b.indexOf(splitter);
const aObjectKey = a.substring(aIndex + splitterLen);
const bObjectKey = b.substring(bIndex + splitterLen);
const aInitiated = keyMap.get(a).initiated;
const bInitiated = keyMap.get(b).initiated;
if (aObjectKey === bObjectKey) {
if (Date.parse(aInitiated) >= Date.parse(bInitiated)) {
return 1;
}
if (Date.parse(aInitiated) < Date.parse(bInitiated)) {
return -1;
}
}
return (aObjectKey < bObjectKey) ? -1 : 1;
});
// Edit the uploads array so it only
// contains keys that contain the prefix
uploads = prefixFilter(prefix, uploads);
uploads = uploads.map(stringKey => {
const index = stringKey.indexOf(splitter);
const index2 = stringKey.indexOf(splitter, index + splitterLen);
const storedMD = keyMap.get(stringKey);
return {
key: stringKey.substring(index + splitterLen, index2),
uploadId: stringKey.substring(index2 + splitterLen),
bucket: storedMD.eventualStorageBucket,
initiatorID: storedMD.initiator.ID,
initiatorDisplayName: storedMD.initiator.DisplayName,
ownerID: storedMD['owner-id'],
ownerDisplayName: storedMD['owner-display-name'],
storageClass: storedMD['x-amz-storage-class'],
initiated: storedMD.initiated,
};
});
// If keyMarker specified, edit the uploads array so it
// only contains keys that occur alphabetically after the marker.
// If there is also an uploadIdMarker specified, filter to eliminate
// any uploads that share the keyMarker and have an uploadId before
// the uploadIdMarker.
if (keyMarker) {
const allMarkers = {
keyMarker,
uploadIdMarker,
};
uploads = markerFilterMPU(allMarkers, uploads);
}
// Iterate through uploads and filter uploads
// with keys containing delimiter
// into response.CommonPrefixes and filter remaining uploads
// into response.Uploads
for (let i = 0; i < uploads.length; i++) {
const currentUpload = uploads[i];
// If hit maxKeys, stop adding keys to response
if (response.MaxKeys >= maxKeys) {
response.IsTruncated = true;
break;
}
// If a delimiter is specified, find its
// index in the current key AFTER THE OCCURRENCE OF THE PREFIX
// THAT WAS SENT IN THE QUERY (not the prefix including the splitter
// and other elements)
let delimiterIndexAfterPrefix = -1;
const currentKeyWithoutPrefix =
currentUpload.key.slice(queryPrefixLength);
let sliceEnd;
if (delimiter) {
delimiterIndexAfterPrefix = currentKeyWithoutPrefix
.indexOf(delimiter);
sliceEnd = delimiterIndexAfterPrefix + queryPrefixLength;
}
// If delimiter occurs in current key, add key to
// response.CommonPrefixes.
// Otherwise add upload to response.Uploads
if (delimiterIndexAfterPrefix > -1) {
const keySubstring = currentUpload.key.slice(0, sliceEnd + 1);
response.addCommonPrefix(keySubstring);
} else {
response.NextKeyMarker = currentUpload.key;
response.NextUploadIdMarker = currentUpload.uploadId;
response.addUpload(currentUpload);
}
}
// `response.MaxKeys` should be the value from the original `MaxUploads`
// parameter specified by the user (or else the default 1000). Redefine it
// here, so it does not equal the value of `uploads.length`.
response.MaxKeys = maxKeys;
// If `response.MaxKeys` is 0, `response.IsTruncated` should be `false`.
response.IsTruncated = maxKeys === 0 ? false : response.IsTruncated;
return callback(null, response);
}
module.exports = getMultipartUploadListing;

View File

@ -0,0 +1,8 @@
const metadata = {
buckets: new Map,
keyMaps: new Map,
};
module.exports = {
metadata,
};

View File

@ -0,0 +1,333 @@
const errors = require('../../../errors');
const list = require('../../../algos/list/exportAlgos');
const genVID =
require('../../../versioning/VersionID').generateVersionId;
const getMultipartUploadListing = require('./getMultipartUploadListing');
const { metadata } = require('./metadata');
// const genVID = versioning.VersionID.generateVersionId;
const defaultMaxKeys = 1000;
let uidCounter = 0;
function generateVersionId(replicationGroupId) {
return genVID(uidCounter++, replicationGroupId);
}
function formatVersionKey(key, versionId) {
return `${key}\0${versionId}`;
}
function inc(str) {
return str ? (str.slice(0, str.length - 1) +
String.fromCharCode(str.charCodeAt(str.length - 1) + 1)) : str;
}
const metastore = {
createBucket: (bucketName, bucketMD, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
// TODO Check whether user already owns the bucket,
// if so return "BucketAlreadyOwnedByYou"
// If not owned by user, return "BucketAlreadyExists"
if (bucket) {
return cb(errors.BucketAlreadyExists);
}
metadata.buckets.set(bucketName, bucketMD);
metadata.keyMaps.set(bucketName, new Map);
return cb();
});
});
},
putBucketAttributes: (bucketName, bucketMD, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
metadata.buckets.set(bucketName, bucketMD);
return cb();
});
});
},
getBucketAttributes: (bucketName, log, cb) => {
process.nextTick(() => {
if (!metadata.buckets.has(bucketName)) {
return cb(errors.NoSuchBucket);
}
return cb(null, metadata.buckets.get(bucketName));
});
},
deleteBucket: (bucketName, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (metadata.keyMaps.has(bucketName)
&& metadata.keyMaps.get(bucketName).length > 0) {
return cb(errors.BucketNotEmpty);
}
metadata.buckets.delete(bucketName);
metadata.keyMaps.delete(bucketName);
return cb(null);
});
});
},
putObject: (bucketName, objName, objVal, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
/*
valid combinations of versioning options:
- !versioning && !versionId: normal non-versioning put
- versioning && !versionId: create a new version
- versionId: update (PUT/DELETE) an existing version,
and also update master version in case the put
version is newer or same version than master.
if versionId === '' update master version
*/
if (params && params.versionId) {
objVal.versionId = params.versionId; // eslint-disable-line
const mst = metadata.keyMaps.get(bucketName).get(objName);
if (mst && mst.versionId === params.versionId || !mst) {
metadata.keyMaps.get(bucketName).set(objName, objVal);
}
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${objVal.versionId}"}`);
}
if (params && params.versioning) {
const versionId = generateVersionId();
objVal.versionId = versionId; // eslint-disable-line
metadata.keyMaps.get(bucketName).set(objName, objVal);
// eslint-disable-next-line
objName = formatVersionKey(objName, versionId);
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${versionId}"}`);
}
if (params && params.versionId === '') {
const versionId = generateVersionId();
objVal.versionId = versionId; // eslint-disable-line
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${objVal.versionId}"}`);
}
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null);
});
});
},
getBucketAndObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
if (err) {
return cb(err, { bucket });
}
if (params && params.versionId) {
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
}
if (!metadata.keyMaps.has(bucketName)
|| !metadata.keyMaps.get(bucketName).has(objName)) {
return cb(null, { bucket: bucket.serialize() });
}
return cb(null, {
bucket: bucket.serialize(),
obj: JSON.stringify(
metadata.keyMaps.get(bucketName).get(objName)
),
});
});
});
},
getObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (params && params.versionId) {
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
}
if (!metadata.keyMaps.has(bucketName)
|| !metadata.keyMaps.get(bucketName).has(objName)) {
return cb(errors.NoSuchKey);
}
return cb(null, metadata.keyMaps.get(bucketName).get(objName));
});
});
},
deleteObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (!metadata.keyMaps.get(bucketName).has(objName)) {
return cb(errors.NoSuchKey);
}
if (params && params.versionId) {
const baseKey = inc(formatVersionKey(objName, ''));
const vobjName = formatVersionKey(objName,
params.versionId);
metadata.keyMaps.get(bucketName).delete(vobjName);
const mst = metadata.keyMaps.get(bucketName).get(objName);
if (mst.versionId === params.versionId) {
const keys = [];
metadata.keyMaps.get(bucketName).forEach((val, key) => {
if (key < baseKey && key > vobjName) {
keys.push(key);
}
});
if (keys.length === 0) {
metadata.keyMaps.get(bucketName).delete(objName);
return cb();
}
const key = keys.sort()[0];
const value = metadata.keyMaps.get(bucketName).get(key);
metadata.keyMaps.get(bucketName).set(objName, value);
}
return cb();
}
metadata.keyMaps.get(bucketName).delete(objName);
return cb();
});
});
},
_hasDeleteMarker(key, keyMap) {
const objectMD = keyMap.get(key);
if (objectMD['x-amz-delete-marker'] !== undefined) {
return (objectMD['x-amz-delete-marker'] === true);
}
return false;
},
listObject(bucketName, params, log, cb) {
process.nextTick(() => {
const {
prefix,
marker,
delimiter,
maxKeys,
continuationToken,
startAfter,
} = params;
if (prefix && typeof prefix !== 'string') {
return cb(errors.InvalidArgument);
}
if (marker && typeof marker !== 'string') {
return cb(errors.InvalidArgument);
}
if (delimiter && typeof delimiter !== 'string') {
return cb(errors.InvalidArgument);
}
if (maxKeys && typeof maxKeys !== 'number') {
return cb(errors.InvalidArgument);
}
if (continuationToken && typeof continuationToken !== 'string') {
return cb(errors.InvalidArgument);
}
if (startAfter && typeof startAfter !== 'string') {
return cb(errors.InvalidArgument);
}
// If paramMaxKeys is undefined, the default parameter will set it.
// However, if it is null, the default parameter will not set it.
let numKeys = maxKeys;
if (numKeys === null) {
numKeys = defaultMaxKeys;
}
if (!metadata.keyMaps.has(bucketName)) {
return cb(errors.NoSuchBucket);
}
// If marker specified, edit the keys array so it
// only contains keys that occur alphabetically after the marker
const listingType = params.listingType;
const extension = new list[listingType](params, log);
const listingParams = extension.genMDParams();
const keys = [];
metadata.keyMaps.get(bucketName).forEach((val, key) => {
if (listingParams.gt && listingParams.gt >= key) {
return null;
}
if (listingParams.gte && listingParams.gte > key) {
return null;
}
if (listingParams.lt && key >= listingParams.lt) {
return null;
}
if (listingParams.lte && key > listingParams.lte) {
return null;
}
return keys.push(key);
});
keys.sort();
// Iterate through keys array and filter keys containing
// delimiter into response.CommonPrefixes and filter remaining
// keys into response.Contents
for (let i = 0; i < keys.length; ++i) {
const currentKey = keys[i];
// Do not list object with delete markers
if (this._hasDeleteMarker(currentKey,
metadata.keyMaps.get(bucketName))) {
continue;
}
const objMD = metadata.keyMaps.get(bucketName).get(currentKey);
const value = JSON.stringify(objMD);
const obj = {
key: currentKey,
value,
};
// calling Ext.filter(obj) adds the obj to the Ext result if
// not filtered.
// Also, Ext.filter returns false when hit max keys.
// What a nifty function!
if (extension.filter(obj) < 0) {
break;
}
}
return cb(null, extension.result());
});
},
listMultipartUploads(bucketName, listingParams, log, cb) {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
if (bucket === undefined) {
// no on going multipart uploads, return empty listing
return cb(null, {
IsTruncated: false,
NextMarker: undefined,
MaxKeys: 0,
});
}
return getMultipartUploadListing(bucket, listingParams, cb);
});
});
},
};
module.exports = metastore;

View File

@ -0,0 +1,274 @@
const NEW_OBJ = 0;
const NEW_VER = 1;
const UPDATE_VER = 2;
const UPDATE_MST = 3;
const RESTORE = 4;
const DEL_VER = 0;
const DEL_MST = 1;
const CURR = 'curr';
const PREV = 'prev';
function deepCopyObject(obj) {
return JSON.parse(JSON.stringify(obj));
}
class DataCounter {
/**
* DataCounter - class for keeping track of the ItemCount metrics
* @return {DataCounter} DataCounter object
*/
constructor() {
this.objects = 0;
this.versions = 0;
this.dataManaged = {
total: { curr: 0, prev: 0 },
byLocation: {},
};
this.stalled = 0;
this.populated = false;
this.transientList = {};
}
/**
* updateTransientList - update data counter list of transient locations
* @param {Object} newLocations - list of locations constraint details
* @return {undefined}
*/
updateTransientList(newLocations) {
if (newLocations && Object.keys(newLocations).length > 0) {
const tempList = {};
Object.keys(newLocations).forEach(loc => {
tempList[loc] = newLocations[loc].isTransient;
});
this.transientList = tempList;
}
}
/**
* set - set DataCounter values
* @param {Object} setVal - object containing values to be used for setting
* DataCounter
* @param {number} setVal.objects - number of master objects
* @param {number} setVal.versions - number of versioned objects
* @param {Object} setVal.dataManaged - object containing information about
* all the data managed
* @param {Object} setVal.total - object containing the total byte count of
* data managed
* @param {number} setVal.total.curr - the total byte count of master
* objects
* @param {number} setVal.total.prev - the total byte count of versioned
* objects
* @param {Object} setVal.byLocaton - object containing the information
* about data managed on each location
* @return {undefined}
*/
set(setVal) {
if (setVal) {
this.objects = setVal.objects;
this.versions = setVal.versions;
this.dataManaged = deepCopyObject(setVal.dataManaged);
this.populated = true;
this.stalled = setVal.stalled;
}
}
/**
* results - creates a deep copy of the current DataCounter values
* @return {Object} - object containing the current DataCounter values
*/
results() {
const obj = {
objects: this.objects,
versions: this.versions,
dataManaged: this.dataManaged,
stalled: this.stalled,
};
return deepCopyObject(obj);
}
/**
* addObjectFn - performing add operations
* @param {ObjectMD} currMD - new master version metadata
* @param {ObjectMD} prevMD - old master version metadata
* @param {number} type - index of the current type of add operation
* @return {undefined}
*/
addObject(currMD, prevMD, type) {
if (type !== undefined && type !== null && this.populated) {
switch (type) {
case NEW_OBJ: // add new object, replace master if needed
if (prevMD) {
this._delValue(prevMD, CURR);
this._addValue(currMD, CURR);
} else {
++this.objects;
this._addValue(currMD, CURR);
}
break;
case NEW_VER: // add new object, archive master
++this.versions;
this._delValue(prevMD, CURR);
this._addValue(prevMD, PREV);
this._addValue(currMD, CURR);
break;
case UPDATE_VER: // update archived object, replication info
this._updateObject(currMD, prevMD, PREV);
break;
case UPDATE_MST: // update master object, replication info
this._updateObject(currMD, prevMD, CURR);
break;
case RESTORE:
--this.versions;
this._delValue(currMD, PREV);
++this.objects;
this._addValue(currMD, CURR);
break;
default:
// should throw error, noop
break;
}
}
}
/**
* delObjectFn - performing del operations
* @param {ObjectMD} currMD - object metadata
* @param {number} type - index of the current type of delete operation
* @return {undefined}
*/
delObject(currMD, type) {
if (type !== undefined && type !== null && this.populated) {
switch (type) {
case DEL_VER:
--this.versions;
this._delValue(currMD, PREV);
break;
case DEL_MST:
--this.objects;
this._delValue(currMD, CURR);
break;
default:
// should throw error, noop
break;
}
}
}
_addLocation(site, size, type) {
this.dataManaged.total[type] += size;
if (!this.dataManaged.byLocation[site]) {
this.dataManaged.byLocation[site] = {
curr: 0,
prev: 0,
};
}
this.dataManaged.byLocation[site][type] += size;
}
/**
* _addValue - helper function for handling put object updates
* @param {ObjectMD} objMD - object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_addValue(objMD, type) {
if (objMD) {
const { replicationInfo, 'content-length': size } = objMD;
const { backends } = replicationInfo || {};
this._addLocation(objMD.dataStoreName, size, type);
if (backends && Array.isArray(backends)) {
backends.forEach(loc => {
const { site, status } = loc;
if (status === 'COMPLETED') {
this._addLocation(site, size, type);
}
});
}
}
}
/**
* _updateObject - helper function for handling updates from replication
* info changes
* @param {ObjectMD} currMD - new object metadata
* @param {ObjectMD} prevMD - old object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_updateObject(currMD, prevMD, type) {
const transientList = Object.assign({}, this.transientList);
if (currMD && prevMD) {
// check for changes in replication
const { replicationInfo: currLocs,
'content-length': size, dataStoreName } = currMD;
const { replicationInfo: prevLocs } = prevMD;
const { backends: prevBackends } = prevLocs || {};
const { backends: currBackends } = currLocs || {};
const oldLocs = {};
if (prevBackends && Array.isArray(prevBackends)) {
prevBackends.forEach(loc => {
const { site, status } = loc;
oldLocs[site] = status;
});
}
if (currBackends && Array.isArray(currBackends)) {
currBackends.forEach(loc => {
const { site, status } = loc;
if (site in oldLocs && status === 'COMPLETED' &&
oldLocs[site] !== status) {
this._addLocation(site, size, type);
}
});
}
if (currLocs.status === 'COMPLETED' &&
transientList[dataStoreName]) {
this._delLocation(dataStoreName, size, type);
}
}
}
_delLocation(site, size, type) {
if (this.dataManaged.byLocation[site]) {
this.dataManaged.total[type] -= size;
this.dataManaged.total[type] =
Math.max(0, this.dataManaged.total[type]);
this.dataManaged.byLocation[site][type] -= size;
this.dataManaged.byLocation[site][type] =
Math.max(0, this.dataManaged.byLocation[site][type]);
}
}
/**
* _delValue - helper function for handling delete object operations
* @param {ObjectMD} objMD - object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_delValue(objMD, type) {
if (objMD) {
const { replicationInfo, 'content-length': size } = objMD;
const { backends } = replicationInfo || {};
this._delLocation(objMD.dataStoreName, size, type);
if (backends && Array.isArray(backends)) {
backends.forEach(loc => {
const { site, status } = loc;
if (status === 'COMPLETED') {
this._delLocation(site, size, type);
}
});
}
}
}
}
module.exports = {
NEW_OBJ,
NEW_VER,
UPDATE_VER,
UPDATE_MST,
RESTORE,
DEL_VER,
DEL_MST,
DataCounter,
};

View File

@ -0,0 +1,172 @@
const stream = require('stream');
/**
* @class ListRecordStream
* @classdesc Filter and stream records returned from a mongodb query
* cursor
*/
class ListRecordStream extends stream.Readable {
/**
* @constructor
* @param {mongodb.Cursor} mongoCursor - cursor returned by a
* mongodb query to the oplog (see
* http://mongodb.github.io/node-mongodb-native/2.0/api/Cursor.html)
* @param {werelogs.Logger} logger - logger object
* @param {string} lastSavedID - unique ID that has been persisted
* of the most recently processed entry in the oplog
* @param {string} latestOplogID - unique ID of the most recently
* added entry in the oplog
*/
constructor(mongoCursor, logger, lastSavedID, latestOplogID) {
super({ objectMode: true });
this._cursor = mongoCursor;
this._logger = logger;
this._lastSavedID = lastSavedID;
this._latestOplogID = latestOplogID;
this._lastConsumedID = null;
// this._unpublishedListing is true once we pass the oplog
// record that has the same uniqID 'h' than last saved. If we
// don't find it (e.g. log rolled over before populator could
// process its oldest entries), we will restart from the
// latest record of the oplog.
this._unpublishedListing = false;
// cf. this.getSkipCount()
this._skipCount = 0;
}
_read() {
// MongoDB cursors provide a stream interface. We choose not
// to use it though because errors may not be emitted by the
// stream when there is an issue with the connection to
// MongoDB (especially when pause()/resume() are used).
//
// Instead we use the async cursor.next() call directly to
// fetch records one at a time, errors are then forwarded in
// the callback.
this._cursor.next((err, item) => {
if (err) {
this._logger.error('mongodb cursor error', {
method: 'mongoclient.ListRecordStream._read()',
error: err.message,
});
this.emit('error', err);
return undefined;
}
if (this._processItem(item)) {
return process.nextTick(this._read.bind(this));
}
// wait until _read() gets called again
return undefined;
});
}
_processItem(itemObj) {
// always update to most recent uniqID
this._lastConsumedID = itemObj.h.toString();
// only push to stream unpublished objects
if (!this._lastSavedID) {
// process from the first entry
this._unpublishedListing = true;
} else if (!this._unpublishedListing) {
// When an oplog with a unique ID that is stored in the
// log offset is found, all oplogs AFTER this is unpublished.
if (this._lastSavedID === this._lastConsumedID) {
this._unpublishedListing = true;
} else if (this._latestOplogID === this._lastConsumedID) {
this._logger.warn(
'did not encounter the last saved offset in oplog, ' +
'resuming processing right after the latest record ' +
'to date; some entries may have been skipped', {
lastSavedID: this._lastSavedID,
latestRecordID: this._latestOplogID,
});
this._unpublishedListing = true;
}
++this._skipCount;
return true; // read next record
}
const dbName = itemObj.ns.slice(itemObj.ns.indexOf('.') + 1);
let entry;
if (itemObj.op === 'i' &&
itemObj.o && itemObj.o._id) {
entry = {
type: 'put',
key: itemObj.o._id,
// value is given as-is for inserts
value: JSON.stringify(itemObj.o.value),
};
} else if (itemObj.op === 'u' &&
itemObj.o && itemObj.o2 && itemObj.o2._id) {
entry = {
type: 'put', // updates overwrite the whole metadata,
// so they are considered as puts
key: itemObj.o2._id,
// updated value may be either stored directly in 'o'
// attribute or in '$set' attribute (supposedly when
// the object pre-exists it will be in '$set')
value: JSON.stringify(
(itemObj.o.$set ? itemObj.o.$set : itemObj.o).value),
};
} else if (itemObj.op === 'd' &&
itemObj.o && itemObj.o._id) {
entry = {
type: 'delete',
key: itemObj.o._id,
// deletion yields no value
};
} else {
// skip other entry types as we don't need them for now
// ('c', ...?)
++this._skipCount;
return true; // read next record
}
const streamObject = {
timestamp: new Date((itemObj.ts ?
itemObj.ts.toNumber() * 1000 : 0)),
db: dbName,
entries: [entry],
};
// push object to the stream, then return false to wait until
// _read() is called again (because we are in an asynchronous
// context already)
this.push(streamObject);
return false;
}
/**
* Get an opaque JSON blob containing the latest consumed offset
* from MongoDB oplog.
*
* @return {string} opaque JSON blob
*/
getOffset() {
return JSON.stringify({
uniqID: this._lastConsumedID,
});
}
/**
* Get the number of entries that have been read and skipped from
* MongoDB oplog since the ListRecordStream instance was created.
*
* @return {integer} number of skipped entries
*/
getSkipCount() {
return this._skipCount;
}
/**
* Get whether the stream reached yet-unpublished records
* (i.e. after we reached either the saved unique ID, or the tip
* of the oplog)
*
* @return {boolean} true if we are now returning unpublished records
*/
reachedUnpublishedListing() {
return this._unpublishedListing;
}
}
module.exports = ListRecordStream;

View File

@ -0,0 +1,121 @@
'use strict'; // eslint-disable-line
const MongoClient = require('mongodb').MongoClient;
const ListRecordStream = require('./ListRecordStream');
/**
* @class
* @classdesc Class to consume mongo oplog
*/
class LogConsumer {
/**
* @constructor
*
* @param {object} mongoConfig - object with the mongo configuration
* @param {string} logger - logger
*/
constructor(mongoConfig, logger) {
const { replicaSetHosts, database } = mongoConfig;
this._mongoUrl = `mongodb://${replicaSetHosts}/local`;
this._logger = logger;
this._oplogNsRegExp = new RegExp(`^${database}\\.`);
// oplog collection
this._coll = null;
}
/**
* Connect to MongoClient using Mongo node module to access database and
* database oplogs (operation logs)
*
* @param {function} done - callback function, called with an error object
* or null and an object as 2nd parameter
* @return {undefined}
*/
connectMongo(done) {
MongoClient.connect(this._mongoUrl, {
replicaSet: 'rs0',
useNewUrlParser: true,
},
(err, client) => {
if (err) {
this._logger.error('Unable to connect to MongoDB',
{ error: err });
return done(err);
}
this._logger.info('connected to mongodb');
// 'local' is the database where MongoDB has oplog.rs
// capped collection
const db = client.db('local', {
ignoreUndefined: true,
});
this._coll = db.collection('oplog.rs');
return done();
});
}
/**
* Open a tailable cursor to mongo oplog and retrieve a stream of
* records to read
*
* @param {Object} [params] - params object
* @param {String} [params.startSeq] - fetch starting from this
* opaque offset returned previously by mongo ListRecordStream
* in an 'info' event
* @param {function} cb - callback function, called with an error
* object or null and an object as 2nd parameter
*
* @return {undefined}
*/
readRecords(params, cb) {
let startSeq = {};
if (params.startSeq) {
try {
// parse the opaque JSON string passed through from a
// previous 'info' event
startSeq = JSON.parse(params.startSeq);
} catch (err) {
this._logger.error('malformed startSeq', {
startSeq: params.startSeq,
});
// start over if malformed
}
}
this._readLatestOplogID((err, latestOplogID) => {
if (err) {
return cb(err);
}
return this._coll.find({
ns: this._oplogNsRegExp,
}, {
tailable: true,
awaitData: true,
noCursorTimeout: true,
numberOfRetries: Number.MAX_VALUE,
}, (err, cursor) => {
const recordStream = new ListRecordStream(
cursor, this._logger, startSeq.uniqID, latestOplogID);
return cb(null, { log: recordStream, tailable: true });
});
});
}
_readLatestOplogID(cb) {
this._coll.find({
ns: this._oplogNsRegExp,
}, {
ts: 1,
}).sort({
$natural: -1,
}).limit(1).toArray((err, data) => {
if (err) {
return cb(err);
}
const latestOplogID = data[0].h.toString();
this._logger.debug('latest oplog ID read', { latestOplogID });
return cb(null, latestOplogID);
});
}
}
module.exports = LogConsumer;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,173 @@
# Mongoclient
We introduce a new metadata backend called *mongoclient* for
[MongoDB](https://www.mongodb.com). This backend takes advantage of
MongoDB being a document store to store the metadata (bucket and
object attributes) as JSON objects.
## Overall Design
The mongoclient backend strictly follows the metadata interface that
stores bucket and object attributes, which consists of the methods
createBucket(), getBucketAttributes(), getBucketAndObject()
(attributes), putBucketAttributes(), deleteBucket(), putObject(),
getObject(), deleteObject(), listObject(), listMultipartUploads() and
the management methods getUUID(), getDiskUsage() and countItems(). The
mongoclient backend also knows how to deal with versioning, it is also
compatible with the various listing algorithms implemented in Arsenal.
FIXME: There should be a document describing the metadata (currently
duck-typing) interface.
### Why Using MongoDB for Storing Bucket and Object Attributes
We chose MongoDB for various reasons:
- MongoDB supports replication, especially through the Raft protocol.
- MongoDB supports a basic replication scheme called 'Replica Set' and
more advanced sharding schemes if required.
- MongoDB is open source and an enterprise standard.
- MongoDB is a document store (natively supports JSON) and supports a
very flexible search interface.
### Choice of Mongo Client Library
We chose to use the official MongoDB driver for NodeJS:
[https://github.com/mongodb/node-mongodb-native](https://github.com/mongodb/node-mongodb-native)
### Granularity for Buckets
We chose to have one collection for one bucket mapping. First because
in a simple mode of replication called 'replica set' it works from the
get-go, but if one or many buckets grow to big it is possible to use
more advanced schemes such as sharding. MongoDB supports a mix of
sharded and non-sharded collections.
### Storing Database Information
We need a special collection called the *Infostore* (stored under the
name __infostore which is impossible to create through the S3 bucket
naming scheme) to store specific database properties such as the
unique *uuid* for Orbit.
### Storing Bucket Attributes
We need to use a special collection called the *Metastore* (stored
under the name __metastore which is impossible to create through the
S3 bucket naming scheme).
### Versioning Format
We chose to keep the same versioning format that we use in some other
Scality products in order to facilitate the compatibility between the
different products.
FIXME: Document the versioning internals in the upper layers and
document the versioning format
### Dealing with Concurrency
We chose not to use transactions (aka
[https://docs.mongodb.com/manual/tutorial/perform-two-phase-commits/)
because it is a known fact there is an overhead of using them, and we
thought there was no real need for them since we could leverage Mongo
ordered operations guarantees and atomic writes.
Example of corner cases:
#### CreateBucket()
Since it is not possible to create a collection AND at the same time
register the bucket in the Metastore we chose to only update the
Metastore. A non-existing collection (NamespaceNotFound error in
Mongo) is one possible normal state for an empty bucket.
#### DeleteBucket()
In this case the bucket is *locked* by the upper layers (use of a
transient delete flag) so we don't have to worry about that and by the
fact the bucket is empty neither (which is also checked by the upper
layers).
We first drop() the collection and then we asynchronously delete the
bucket name entry from the metastore (the removal from the metastore
is atomic which is not absolutely necessary in this case but more
robust in term of design).
If we fail in between we still have an entry in the metastore which is
good because we need to manage the delete flag. For the upper layers
the operation has not completed until this flag is removed. The upper
layers will restart the deleteBucket() which is fine because we manage
the case where the collection does not exist.
#### PutObject() with a Version
We need to store the versioned object then update the master object
(the latest version). For this we use the
[BulkWrite](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#bulkWrite)
method. This is not a transaction but guarantees that the 2 operations
will happen sequentially in the MongoDB oplog. Indeed if the
BulkWrite() fails in between we would end up creating an orphan (which
is not critical) but if the operation succeeds then we are sure that
the master is always pointing to the right object. If there is a
concurrency between 2 clients then we are sure that the 2 groups of
operations will be clearly decided in the oplog (the last writer will
win).
#### DeleteObject()
This is probably the most complex case to manage because it involves a
lot of different cases:
##### Deleting an Object when Versioning is not Enabled
This case is a straightforward atomic delete. Atomicity is not really
required because we assume version IDs are random enough but it is
more robust to do so.
##### Deleting an Object when Versioning is Enabled
This case is more complex since we have to deal with the 2 cases:
Case 1: The caller asks for a deletion of a version which is not a master:
This case is a straight-forward atomic delete.
Case 2: The caller asks for a deletion of a version which is the master: In
this case we need to create a special flag called PHD (as PlaceHolDer)
that indicates the master is no longer valid (with a new unique
virtual version ID). We force the ordering of operations in a
bulkWrite() to first replace the master with the PHD flag and then
physically delete the version. If the call fail in between we will be
left with a master with a PHD flag. If the call succeeds we try to
find if the master with the PHD flag is left alone in such case we
delete it otherwise we trigger an asynchronous repair that will spawn
after AYNC_REPAIR_TIMEOUT=15s that will reassign the master to the
latest version.
In all cases the physical deletion or the repair of the master are
checked against the PHD flag AND the actual unique virtual version
ID. We do this to check against potential concurrent deletions,
repairs or updates. Only the last writer/deleter has the right to
physically perform the operation, otherwise it is superseded by other
operations.
##### Getting an object with a PHD flag
If the caller is asking for the latest version of an object and the
PHD flag is set we perform a search on the bucket to find the latest
version and we return it.
#### Listing Objects
The mongoclient backend implements a readable key/value stream called
*MongoReadStream* that follows the LevelDB duck typing interface used
in Arsenal/lib/algos listing algorithms. Note it does not require any
LevelDB package.
#### Generating the UUID
To avoid race conditions we always (try to) generate a new UUID and we
condition the insertion to the non-existence of the document.

View File

@ -0,0 +1,137 @@
const Readable = require('stream').Readable;
const MongoUtils = require('./utils');
class MongoReadStream extends Readable {
constructor(c, options, searchOptions) {
super({
objectMode: true,
highWaterMark: 0,
});
if (options.limit === 0) {
return;
}
const query = {
_id: {},
};
if (options.reverse) {
if (options.start) {
query._id.$lte = options.start;
}
if (options.end) {
query._id.$gte = options.end;
}
if (options.gt) {
query._id.$lt = options.gt;
}
if (options.gte) {
query._id.$lte = options.gte;
}
if (options.lt) {
query._id.$gt = options.lt;
}
if (options.lte) {
query._id.$gte = options.lte;
}
} else {
if (options.start) {
query._id.$gte = options.start;
}
if (options.end) {
query._id.$lte = options.end;
}
if (options.gt) {
query._id.$gt = options.gt;
}
if (options.gte) {
query._id.$gte = options.gte;
}
if (options.lt) {
query._id.$lt = options.lt;
}
if (options.lte) {
query._id.$lte = options.lte;
}
}
if (!Object.keys(query._id).length) {
delete query._id;
}
if (searchOptions) {
Object.assign(query, searchOptions);
}
this._cursor = c.find(query).sort({
_id: options.reverse ? -1 : 1,
});
if (options.limit && options.limit !== -1) {
this._cursor = this._cursor.limit(options.limit);
}
this._options = options;
this._destroyed = false;
this.on('end', this._cleanup.bind(this));
}
_read() {
if (this._destroyed) {
return;
}
this._cursor.next((err, doc) => {
if (this._destroyed) {
return;
}
if (err) {
this.emit('error', err);
return;
}
let key = undefined;
let value = undefined;
if (doc) {
key = doc._id;
MongoUtils.unserialize(doc.value);
value = JSON.stringify(doc.value);
}
if (key === undefined && value === undefined) {
this.push(null);
} else if (this._options.keys !== false &&
this._options.values === false) {
this.push(key);
} else if (this._options.keys === false &&
this._options.values !== false) {
this.push(value);
} else {
this.push({
key,
value,
});
}
});
}
_cleanup() {
if (this._destroyed) {
return;
}
this._destroyed = true;
this._cursor.close(err => {
if (err) {
this.emit('error', err);
return;
}
this.emit('close');
});
}
destroy() {
return this._cleanup();
}
}
module.exports = MongoReadStream;

View File

@ -0,0 +1,30 @@
function escape(obj) {
return JSON.parse(JSON.stringify(obj).
replace(/\$/g, '\uFF04').
replace(/\./g, '\uFF0E'));
}
function unescape(obj) {
return JSON.parse(JSON.stringify(obj).
replace(/\uFF04/g, '$').
replace(/\uFF0E/g, '.'));
}
function serialize(objMD) {
// Tags require special handling since dot and dollar are accepted
if (objMD.tags) {
// eslint-disable-next-line
objMD.tags = escape(objMD.tags);
}
}
function unserialize(objMD) {
// Tags require special handling
if (objMD.tags) {
// eslint-disable-next-line
objMD.tags = unescape(objMD.tags);
}
}
module.exports = { escape, unescape, serialize, unserialize };

View File

@ -0,0 +1,375 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../../errors');
const BucketInfo = require('../../../models/BucketInfo');
const { getURIComponents, getRequestBody, sendResponse } = require('./utils');
class BucketdRoutes {
/**
* Create a new Bucketd routes instance
* This class implements the bucketd Metadata protocol and is used in
* the Metadata Proxy Server to implement this protocol on top of
* various metadata backends.
*
* Implementation note: the adaptations performed in the methods of
* the class MetadataWrapper are not required in this context.
* For this reason, the methods of the `client' instance are directly
* called from this class, somewhat defeating the encapsulation of the
* wrapper.
*
* @param {Arsenal.storage.metadata.MetadataWrapper} metadataWrapper - to
* be used as a translation target for the bucketd protocol.
* @param {werelogs.Logger} logger - werelogs logger object
*/
constructor(metadataWrapper, logger) {
this._metadataWrapper = metadataWrapper;
this._logger = logger;
}
// Metadata Wrapper's wrapper
// `attributes' context methods
_getBucketAttributes(req, res, bucketName, logger) {
return this._metadataWrapper.client.getBucketAttributes(
bucketName, logger, (err, data) => {
if (err) {
logger.error('Failed to get bucket attributes',
{ bucket: bucketName, error: err });
return sendResponse(req, res, logger, err);
}
if (data === undefined) {
return sendResponse(req, res, logger,
errors.NoSuchBucket);
}
return sendResponse(req, res, logger, null,
BucketInfo.fromObj(data).serialize());
});
}
_putBucketAttributes(req, res, bucketName, data, logger) {
return this._metadataWrapper.client.putBucketAttributes(
bucketName, BucketInfo.deSerialize(data), logger, err =>
sendResponse(req, res, logger, err));
}
// `bucket' context methods
_createBucket(req, res, bucketName, data, logger) {
return this._metadataWrapper.client.createBucket(
bucketName, BucketInfo.deSerialize(data), logger, err =>
sendResponse(req, res, logger, err));
}
_deleteBucket(req, res, bucketName, logger) {
return this._metadataWrapper.client.deleteBucket(
bucketName, logger, err =>
sendResponse(req, res, logger, err));
}
_putObject(req, res, bucketName, objectName, objectValue, params, logger) {
let parsedValue;
try {
parsedValue = JSON.parse(objectValue);
} catch (err) {
logger.error('Malformed JSON value', { value: objectValue });
return sendResponse(req, res, logger, errors.BadRequest);
}
return this._metadataWrapper.client.putObject(
bucketName, objectName, parsedValue,
params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_getObject(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.getObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_deleteObject(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.deleteObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_listObject(req, res, bucketName, params, logger) {
const listingParameters = params || {};
if (listingParameters.listingType === undefined) {
listingParameters.listingType = 'Delimiter';
}
if (listingParameters.maxKeys) {
listingParameters.maxKeys = Number.parseInt(params.maxKeys, 10);
}
return this._metadataWrapper.client.listObject(
bucketName, listingParameters, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
// `admin' context methods
_checkHealth(req, res, logger) {
return this._metadataWrapper.checkHealth(logger, (err, resp) => {
if (err) {
logger.error('Failed the health check',
{ error: err, method: '_checkHealth' });
return sendResponse(req, res, logger, err);
}
return sendResponse(req, res, logger, undefined, resp);
});
}
_createRequestLogger(req) {
const uids = req.headers['x-scal-request-uids'];
const logger = uids === undefined ?
this._logger.newRequestLogger() :
this._logger.newRequestLoggerFromSerializedUids(uids);
logger.trace('new request', { method: req.method, url: req.url });
return logger;
}
// `parallel' context methods
_getBucketAndObjectMD(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.getBucketAndObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
// Internal routes
/**
* Handle routes related to operations on bucket attributes
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_attributesRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for attributes route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getBucketAttributes(
req, res,
uriComponents.bucketName, logger, (err, attrs) =>
sendResponse(req, res, logger, err, attrs));
case 'POST':
return getRequestBody(logger, req, (err, body) => {
if (err) {
return sendResponse(req, res, logger, err);
}
return this._putBucketAttributes(
req, res,
uriComponents.bucketName, body, logger, err =>
sendResponse(req, res, logger, err));
});
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on buckets
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {string} uriComponents.namespace - Select the control plane with
* `_' or the data plane with
* `default'.
* @param {string} uriComponents.context - Targets the bucket itself with
* `attributes' or the content of
* the bucket with `bucket'.
* @param {string} uriComponents.bucketName - The name of the bucket
* @param {string} uriComponents.objectName - the key of the object in the
* bucket
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_bucketRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for bucket route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._listObject(req, res,
uriComponents.bucketName,
uriComponents.options,
logger);
case 'DELETE':
return this._deleteBucket(req, res,
uriComponents.bucketName, logger);
case 'POST':
return getRequestBody(logger, req, (err, body) => {
if (err) {
return sendResponse(req, res, logger, err);
}
return this._createBucket(req, res,
uriComponents.bucketName,
body, logger);
});
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on objects
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_objectRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for object route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
case 'DELETE':
return this._deleteObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
case 'POST':
return getRequestBody(logger, req, (err, body) =>
this._putObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
body,
uriComponents.options,
logger));
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on both objects and buckets
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_parallelRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for parallel route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getBucketAndObjectMD(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle default routes. e.g. URI starting with /default/
* (or anything excepted an underscore)
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_defaultRoutes(req, res, uriComponents, logger) {
switch (uriComponents.context) {
case 'leader':
case 'informations':
logger.trace(`${uriComponents.context} operation`);
return sendResponse(req, res, logger, errors.NotImplemented);
case 'metadataInformation':
return sendResponse(req, res, logger, undefined,
'{"metadataVersion":2}');
case 'parallel':
logger.trace(`${uriComponents.context} operation`);
if (uriComponents.objectName) {
return this._parallelRoutes(req, res, uriComponents, logger);
}
return sendResponse(req, res, logger, errors.RouteNotFound);
case 'bucket':
logger.trace(`${uriComponents.context} operation`);
if (uriComponents.objectName) {
return this._objectRoutes(req, res, uriComponents, logger);
}
return this._bucketRoutes(req, res, uriComponents, logger);
case 'attributes':
logger.trace(`${uriComponents.context} operation`);
return this._attributesRoutes(req, res, uriComponents, logger);
default:
logger.error('invalid URI', { uriComponents });
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle admin routes. e.g. URI starting with /_/
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_adminRoutes(req, res, uriComponents, logger) {
switch (uriComponents.context) {
case 'healthcheck':
return this._checkHealth(req, res, logger);
default:
return sendResponse(req, res, logger, errors.NotImplemented);
}
}
// The route dispatching method
/**
* dispatch the HTTP request to the appropriate handling function.
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @return {undefined}
*/
dispatch(req, res) {
const adminNamespace = '_';
const logger = this._createRequestLogger(req);
const uriComponents = getURIComponents(req.url, logger);
if (!uriComponents) {
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (uriComponents.namespace) {
case adminNamespace:
return this._adminRoutes(req, res, uriComponents, logger);
default: // coincidently matches the `default' literal namespace as well
return this._defaultRoutes(req, res, uriComponents, logger);
}
}
}
module.exports = BucketdRoutes;

View File

@ -0,0 +1,33 @@
# Metatada Proxy Server
## Design goals
## Design choices
## Implementation details
## How to run the proxy server
```js
const werelogs = require('werelogs');
const MetadataWrapper = require('arsenal')
.storage.metadata.MetadataWrapper;
const Server = require('arsenal')
.storage.metadata.proxy.Server;
const logger = new werelogs.Logger('MetadataProxyServer',
'debug', 'debug');
const metadataWrapper = new MetadataWrapper('mem', {},
null, logger);
const server = new Server(metadataWrapper,
{
port: 9001,
workers: 1,
},
logger);
server.start(() => {
logger.info('Metadata Proxy Server successfully started. ' +
`Using the ${metadataWrapper.implName} backend`);
});
```

View File

@ -0,0 +1,105 @@
'use strict'; // eslint-disable-line strict
const cluster = require('cluster');
const HttpServer = require('../../../network/http/server');
const BucketdRoutes = require('./BucketdRoutes');
const requiresOneWorker = {
// in memory kvs storage is not shared across processes
memorybucket: true,
};
class Server {
/**
* Create a new Metadata Proxy Server instance
*
* The Metadata Proxy Server is an HTTP server that translates
* requests of the bucketd sub-protocol into function calls to
* a properly configured MetadataWrapper instance. Such instance
* can use any of the available metadata backends available.
*
* @param {arsenal.storage.metadata.MetadataWrapper} metadataWrapper -
* @param {Object} configuration -
* @param {number} configuration.port -
* @param {number} configuration.workers -
* @param {werelogs.Logger} logger -
*/
constructor(metadataWrapper, configuration, logger) {
this._configuration = configuration;
if (requiresOneWorker[metadataWrapper.implName] &&
this._configuration.workers !== 1) {
logger.warn('This metadata backend requires only one worker',
{ metadataBackend: metadataWrapper.implName });
this._configuration.workers = 1;
}
this._logger = logger;
this._metadataWrapper = metadataWrapper;
this._proxyRoutes = new BucketdRoutes(metadataWrapper, this._logger);
this._httpServer = null;
this._installSignalHandlers();
}
_cleanup() {
if (cluster.isWorker) {
this._logger.info('Server worker shutting down...');
this._httpServer.stop();
} else {
this._logger.info('Server shutting down...');
}
return process.exit(0);
}
_installSignalHandlers() {
process.on('SIGINT', () => { this._cleanup(); });
process.on('SIGHUP', () => { this._cleanup(); });
process.on('SIGQUIT', () => { this._cleanup(); });
process.on('SIGTERM', () => { this._cleanup(); });
process.on('SIGPIPE', () => {});
}
/**
* Start the Metadata Proxy Server instance
*
* @param {Function} cb - called with no argument when the onListening event
* is triggered
* @return {undefined}
*/
start(cb) {
if (cluster.isMaster) {
for (let i = 0; i < this._configuration.workers; ++i) {
cluster.fork();
}
cluster.on('disconnect', worker => {
this._logger
.info(`worker ${worker.process.pid} exited, respawning.`);
cluster.fork();
});
} else {
this._httpServer = new HttpServer(this._configuration.port,
this._logger);
if (this._configuration.bindAddress) {
this._httpServer.setBindAddress(
this._configuration.bindAddress);
}
this._httpServer
.onRequest((req, res) => this._proxyRoutes.dispatch(req, res))
.onListening(() => {
this._logger.info(
'Metadata Proxy Server now listening on' +
` port ${this._configuration.port}`);
if (cb) {
return this._metadataWrapper.setup(cb);
}
return this._metadataWrapper.setup(() => {
this._logger.info('MetadataWrapper setup complete.');
});
})
.start();
}
}
}
module.exports = Server;

View File

@ -0,0 +1,183 @@
const url = require('url');
const querystring = require('querystring');
const errors = require('../../../errors');
/**
* Extracts components from URI.
* @param {string} uri - uri part of the received request
* @param {werelogs.Logger} logger -
* @return {object} ret - URI breakdown of the request to process
* @return {string} ret.namespace - targeted plane, control plane is targeted
* with `_' and the data plane with `default'.
* @return {string} ret.context - Targets the bucket itself with
* `attributes' or the content of
* the bucket with `bucket'.
* @return {string} ret.bucketName - The name of the bucket
* @return {string} ret.objectName - the key of the object in the bucket
*/
function getURIComponents(uri, logger) {
try {
if (uri.charAt(0) !== '/') {
return {};
}
const { pathname, query } = url.parse(uri);
const options = query ? querystring.parse(query) : {};
const typeIndex = pathname.indexOf('/', 1);
const bucketIndex = pathname.indexOf('/', typeIndex + 1);
const objectIndex = pathname.indexOf('/', bucketIndex + 1);
if (typeIndex === -1 || typeIndex === pathname.length - 1) {
return {};
}
if (bucketIndex === -1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1),
};
}
if (bucketIndex === pathname.length - 1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
};
}
if (objectIndex === -1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1),
options,
};
}
if (objectIndex === pathname.length - 1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
options,
};
}
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
objectName: decodeURIComponent(pathname.substring(objectIndex + 1)),
options,
};
} catch (ex) {
logger.error('Invalid URI: failed to parse',
{ uri, error: ex, errorStack: ex.stack,
message: ex.message });
return null;
}
}
/**
* Extracts the body of the request through a callback
* @param {werelogs.Logger} logger - instance of the logger that will emit the
* log entry
* @param {http.IncomingMessage} request - request received from bucketclient
* @param {Function} cb - function which has an interest in the request body.
* The first parameter is err and may be falsey
* The second parameter is the body of the request
* @return {undefined}
*/
function getRequestBody(logger, request, cb) {
const body = [];
let bodyLen = 0;
request.on('data', data => {
body.push(data);
bodyLen += data.length;
}).on('error', cb).on('end', () => {
cb(null, Buffer.concat(body, bodyLen).toString());
}).on('close', () => {
logger.error('Connection closed by remote peer');
/* Execution will eventually reach the sendResponse code which will
* trigger the proper cleanup as the remote peer already hung up and
* nobody is on the line to get the message */
cb(errors.BadRequest);
});
}
/**
* Emit a log entry corresponding to the end of the request
*
* @param {werelogs.Logger} logger - instance of the logger that will emit the
* log entry
* @param {http.IncomingMessage} req - request being processed
* @param {object} statusCode - HTTP status code sent back to the client
* @param {object} statusMessage - HTTP status message sent back to the client
* @return {undefined}
*/
function _logRequestEnd(logger, req, statusCode, statusMessage) {
const info = {
clientIp: req.socket.remoteAddress,
clientPort: req.socket.remotePort,
httpMethod: req.method,
httpURL: req.url,
httpCode: statusCode,
httpMessage: statusMessage,
};
logger.end('finished handling request', info);
}
/**
* Request processing exit point, sends back to the client the specified data
* and/or error code
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {werelogs.Logger} log - instance of the logger to use
* @param {Arsenal.Error} err - if not null, defines the HTTP status
* code and message
* @param {string} data - if not null, used as the response body. If `data'
* isn't a string, it's considered as a JSON object and
* it's content get serialized before being sent.
* @return {undefined}
*/
function sendResponse(req, res, log, err, data) {
let statusCode;
let statusMessage;
if (err) {
statusCode = err.code;
statusMessage = err.message;
} else {
statusCode = errors.ok.code;
statusMessage = errors.ok.message;
}
if (data) {
let resData = data;
if (typeof resData === 'object') {
resData = JSON.stringify(data);
} else if (typeof resData === 'number') {
resData = resData.toString();
}
/*
* Encoding data to binary provides a hot path to write data
* directly to the socket, without node.js trying to encode the data
* over and over again.
*/
const rawData = Buffer.from(resData, 'utf8');
/*
* Using Buffer.bytelength is not required here because data is binary
* encoded, data.length would give us the exact byte length
*/
res.writeHead(statusCode, statusMessage, {
'content-length': rawData.length,
});
res.write(rawData);
} else {
res.writeHead(statusCode, statusMessage, { 'content-length': 0 });
}
return res.end(() => {
_logRequestEnd(log, req, statusCode, statusMessage);
});
}
module.exports = {
getURIComponents,
getRequestBody,
sendResponse,
};

2725
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +1,16 @@
{ {
"name": "arsenal", "name": "arsenal",
"engines": { "engines": {
"node": ">=6.9.5" "node": ">=8"
}, },
"version": "7.4.3", "version": "8.0.6",
"description": "Common utilities for the S3 project components", "description": "Common utilities for the S3 project components",
"main": "index.js", "main": "index.js",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git+https://github.com/scality/Arsenal.git" "url": "git+https://github.com/scality/Arsenal.git"
}, },
"author": "Giorgio Regni", "author": "Scality Inc.",
"license": "Apache-2.0", "license": "Apache-2.0",
"bugs": { "bugs": {
"url": "https://github.com/scality/Arsenal/issues" "url": "https://github.com/scality/Arsenal/issues"
@ -20,13 +20,16 @@
"JSONStream": "^1.0.0", "JSONStream": "^1.0.0",
"ajv": "4.10.0", "ajv": "4.10.0",
"async": "~2.1.5", "async": "~2.1.5",
"bson": "2.0.4",
"debug": "~2.3.3", "debug": "~2.3.3",
"diskusage": "^1.1.1", "diskusage": "^1.1.1",
"fcntl": "github:scality/node-fcntl",
"ioredis": "4.9.5", "ioredis": "4.9.5",
"ipaddr.js": "1.2.0", "ipaddr.js": "1.2.0",
"joi": "^10.6", "joi": "^10.6",
"level": "~5.0.1", "level": "~5.0.1",
"level-sublevel": "~6.6.5", "level-sublevel": "~6.6.5",
"mongodb": "^3.0.1",
"node-forge": "^0.7.1", "node-forge": "^0.7.1",
"simple-glob": "^0.1", "simple-glob": "^0.1",
"socket.io": "~1.7.3", "socket.io": "~1.7.3",

View File

@ -0,0 +1,105 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const RedisClient = require('../../../lib/metrics/RedisClient');
const { backbeat } = require('../../../');
// expirations
const EXPIRY = 86400; // 24 hours
const THROUGHPUT_EXPIRY = 900; // 15 minutes
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: false,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const sites = ['site1', 'site2'];
const metrics = new backbeat.Metrics({
redisConfig: config,
validSites: ['site1', 'site2', 'all'],
internalStart: Date.now() - (EXPIRY * 1000), // 24 hours ago.
}, fakeLogger);
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('Metrics class', () => {
afterEach(() => redisClient.clear(() => {}));
it('should not crash on empty results', done => {
const redisKeys = {
ops: 'bb:crr:ops',
bytes: 'bb:crr:bytes',
opsDone: 'bb:crr:opsdone',
bytesDone: 'bb:crr:bytesdone',
bytesFail: 'bb:crr:bytesfail',
opsFail: 'bb:crr:opsfail',
failedCRR: 'bb:crr:failed',
opsPending: 'bb:crr:bytespending',
bytesPending: 'bb:crr:opspending',
};
const routes = backbeat.routes(redisKeys, sites);
const details = routes.find(route =>
route.category === 'metrics' && route.type === 'all');
details.site = 'all';
metrics.getAllMetrics(details, (err, res) => {
assert.ifError(err);
const expected = {
pending: {
description: 'Number of pending replication operations ' +
'(count) and bytes (size)',
results: {
count: 0,
size: 0,
},
},
backlog: {
description: 'Number of incomplete replication operations' +
' (count) and number of incomplete bytes transferred' +
' (size)',
results: {
count: 0,
size: 0,
},
},
completions: {
description: 'Number of completed replication operations' +
' (count) and number of bytes transferred (size) in ' +
`the last ${EXPIRY} seconds`,
results: {
count: 0,
size: 0,
},
},
failures: {
description: 'Number of failed replication operations ' +
`(count) and bytes (size) in the last ${EXPIRY} ` +
'seconds',
results: {
count: 0,
size: 0,
},
},
throughput: {
description: 'Current throughput for replication' +
' operations in ops/sec (count) and bytes/sec (size) ' +
`in the last ${THROUGHPUT_EXPIRY} seconds`,
results: {
count: '0.00',
size: '0.00',
},
},
};
assert.deepStrictEqual(res, expected);
done();
});
});
});

View File

@ -0,0 +1,68 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const crypto = require('crypto');
const async = require('async');
const TTLVCodec = require('../../../lib/network/kmip/codec/ttlv.js');
const LoopbackServerChannel =
require('../../utils/kmip/LoopbackServerChannel.js');
const TransportTemplate =
require('../../../lib/network/kmip/transport/TransportTemplate.js');
const KMIP = require('../../../lib/network/kmip');
const KMIPClient = require('../../../lib/network/kmip/Client.js');
const {
logger,
} = require('../../utils/kmip/ersatz.js');
class LoopbackServerTransport extends TransportTemplate {
constructor(options) {
super(new LoopbackServerChannel(KMIP, TTLVCodec), options);
}
}
describe('KMIP High Level Driver', () => {
[null, 'dummyAttributeName'].forEach(bucketNameAttributeName => {
[false, true].forEach(compoundCreateActivate => {
const options = {
kmip: {
client: {
bucketNameAttributeName,
compoundCreateActivate,
},
codec: {},
transport: {
pipelineDepth: 8,
tls: {
port: 5696,
},
},
},
};
it('should work with' +
` x-name attribute: ${!!bucketNameAttributeName},` +
` compound creation: ${compoundCreateActivate}`,
done => {
const kmipClient = new KMIPClient(options, TTLVCodec,
LoopbackServerTransport);
const plaintext = Buffer.from(crypto.randomBytes(32));
async.waterfall([
next => kmipClient.createBucketKey('plop', logger, next),
(id, next) =>
kmipClient.cipherDataKey(1, id, plaintext,
logger, (err, ciphered) => {
next(err, id, ciphered);
}),
(id, ciphered, next) =>
kmipClient.decipherDataKey(
1, id, ciphered, logger, (err, deciphered) => {
assert(plaintext
.compare(deciphered) === 0);
next(err, id);
}),
(id, next) =>
kmipClient.destroyBucketKey(id, logger, next),
], done);
});
});
});
});

View File

@ -0,0 +1,52 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const TTLVCodec = require('../../../lib/network/kmip/codec/ttlv.js');
const TransportTemplate =
require('../../../lib/network/kmip/transport/TransportTemplate.js');
const KMIP = require('../../../lib/network/kmip');
const {
logger,
MirrorChannel,
} = require('../../utils/kmip/ersatz.js');
const lowlevelFixtures = require('../../utils/kmip/lowlevelFixtures.js');
class MirrorTransport extends TransportTemplate {
constructor(options) {
super(new MirrorChannel(KMIP, TTLVCodec), options);
}
}
const options = {
kmip: {
codec: {},
transport: {
pipelineDepth: 8,
tls: {
port: 5696,
},
},
},
};
describe('KMIP Low Level Driver', () => {
lowlevelFixtures.forEach((fixture, n) => {
it(`should work with fixture #${n}`, done => {
const kmip = new KMIP(TTLVCodec, MirrorTransport, options);
const requestPayload = fixture.payload(kmip);
kmip.request(logger, fixture.operation,
requestPayload, (err, response) => {
if (err) {
return done(err);
}
const responsePayload = response.lookup(
'Response Message/Batch Item/Response Payload'
)[0];
assert.deepStrictEqual(responsePayload,
requestPayload);
return done();
});
});
});
});

View File

@ -0,0 +1,82 @@
'use strict'; // eslint-disable-line
const async = require('async');
const assert = require('assert');
const TransportTemplate =
require('../../../lib/network/kmip/transport/TransportTemplate.js');
const { logger, EchoChannel } = require('../../utils/kmip/ersatz.js');
describe('KMIP Transport Template Class', () => {
const pipelineDepths = [1, 2, 4, 8, 16, 32];
const requestNumbers = [1, 37, 1021, 8191];
pipelineDepths.forEach(pipelineDepth => {
requestNumbers.forEach(iterations => {
it(`should survive ${iterations} iterations` +
` with ${pipelineDepth}way pipeline`,
done => {
const transport = new TransportTemplate(
new EchoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
async.times(iterations, (n, next) => {
transport.send(logger, request,
(err, conversation, response) => {
if (err) {
return next(err);
}
if (request.compare(response) !== 0) {
return next(Error('arg'));
}
return next();
});
}, err => {
transport.end();
done(err);
});
});
[true, false].forEach(doEmit => {
it('should report errors to outstanding requests.' +
` w:${pipelineDepth}, i:${iterations}, e:${doEmit}`,
done => {
const echoChannel = new EchoChannel;
echoChannel.clog();
const transport = new TransportTemplate(
echoChannel,
{
pipelineDepth,
tls: {
port: 5696,
},
});
const request = Buffer.alloc(10).fill(6);
/* Using a for loop here instead of anything
* asynchronous, the callbacks get stuck in
* the conversation queue and are unwind with
* an error. It is the purpose of this test */
for (let i = 0; i < iterations; ++i) {
transport.send(
logger, request,
(err, conversation, response) => {
assert(err);
assert(!response);
});
}
if (doEmit) {
echoChannel.emit('error', new Error('awesome'));
} else {
transport.abortPipeline(echoChannel);
}
transport.end();
done();
});
});
});
});
});

View File

@ -0,0 +1,319 @@
'use strict'; // eslint-disable-line strict
const werelogs = require('werelogs');
const assert = require('assert');
const async = require('async');
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
const MetadataWrapper =
require('../../../lib/storage/metadata/MetadataWrapper');
const BucketRoutes =
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
const routes = new BucketRoutes(metadataWrapper, logger);
const dispatcher = new RequestDispatcher(routes);
const Bucket = 'test';
const bucketInfo = {
acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
name: Bucket,
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
ownerDisplayName: 'anonymousCoward',
creationDate: '2018-06-04T17:45:42.592Z',
mdBucketModelVersion: 8,
transient: false,
deleted: false,
serverSideEncryption: null,
versioningConfiguration: null,
locationConstraint: 'us-east-1',
readLocationConstraint: 'us-east-1',
cors: null,
replicationConfiguration: null,
lifecycleConfiguration: null,
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
isNFS: null,
};
const objects = [
'aaa',
'bbb/xaa',
'bbb/xbb',
'bbb/xcc',
'ccc',
'ddd',
];
function _getExpectedListing(prefix, objects) {
const filtered = objects.map(key => {
const deprefixed = key.slice(prefix.length);
return deprefixed.replace(/[/].*/, '/');
});
const keySet = {};
return filtered.filter(key => {
if (keySet[key]) {
return false;
}
if (key === '') {
return false;
}
keySet[key] = true;
return true;
});
}
function _listingURL(prefix, marker) {
const reSlash = /[/]/g;
const escapedPrefix = prefix.replace(reSlash, '%2F');
const escapedMarker = marker.replace(reSlash, '%2F');
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
}
function _listObjects(prefix, objects, cb) {
const keys = _getExpectedListing(prefix, objects);
const markers = keys.slice(0);
markers.unshift(undefined);
const lastKey = keys[keys.length - 1];
const listing = keys.map((key, index) => ({
key,
marker: markers[index],
NextMarker: markers[index + 1],
IsTruncated: key !== lastKey,
isPrefix: key.endsWith('/'),
}));
async.mapLimit(listing, 5, (obj, next) => {
const currentMarker = obj.marker === undefined ? '' : obj.marker;
dispatcher.get(_listingURL(prefix, prefix + currentMarker),
(err, response, body) => {
if (err) {
return next(err);
}
if (obj.isPrefix) {
assert.strictEqual(body.Contents.length, 0);
assert.strictEqual(body.CommonPrefixes.length,
1);
assert.strictEqual(body.CommonPrefixes[0],
prefix + obj.key);
} else {
assert.strictEqual(body.Contents.length, 1);
assert.strictEqual(body.CommonPrefixes.length,
0);
assert.strictEqual(body.Contents[0].key,
prefix + obj.key);
}
assert.strictEqual(body.IsTruncated,
obj.IsTruncated);
if (body.IsTruncated) {
assert.strictEqual(body.NextMarker,
prefix + obj.NextMarker);
}
return next();
});
}, err => cb(err));
}
function _createObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
{ key }, next);
}, err => {
cb(err);
});
}
function _readObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
(err, response, body) => {
assert.deepStrictEqual(body.key, key);
next(err);
});
}, err => {
cb(err);
});
}
function _deleteObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
err => next(err));
}, err => {
cb(err);
});
}
describe('Basic Metadata Proxy Server test',
function bindToThis() {
this.timeout(10000);
it('Shoud get the metadataInformation', done => {
dispatcher.get('/default/metadataInformation',
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(
body, { metadataVersion: 2 });
return done();
});
});
});
describe('Basic Metadata Proxy Server CRUD test', function bindToThis() {
this.timeout(10000);
beforeEach(done => {
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
done);
});
afterEach(done => {
dispatcher.delete(`/default/bucket/${Bucket}`, done);
});
it('Should get the bucket attributes', done => {
dispatcher.get(`/default/attributes/${Bucket}`,
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(body.name,
bucketInfo.name);
return done();
});
});
it('Should crud an object', done => {
async.waterfall([
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'gabu' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'gabu');
next(err);
}
}),
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'zome' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'zome');
next(err);
}
}),
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
err => next(err)),
], err => done(err));
});
it('Should list objects', done => {
async.waterfall([
next => _createObjects(objects, next),
next => _readObjects(objects, next),
next => _listObjects('', objects, next),
next => _listObjects('bbb/', objects, next),
next => _deleteObjects(objects, next),
], err => {
done(err);
});
});
it('Should update bucket properties', done => {
dispatcher.get(
`/default/attributes/${Bucket}`, (err, response, body) => {
assert.strictEqual(err, null);
const bucketInfo = body;
const newOwnerDisplayName = 'divertedfrom';
bucketInfo.ownerDisplayName = newOwnerDisplayName;
dispatcher.post(
`/default/attributes/${Bucket}`, bucketInfo, err => {
assert.strictEqual(err, null);
dispatcher.get(
`/default/attributes/${Bucket}`,
(err, response, body) => {
assert.strictEqual(err, null);
const newBucketInfo = body;
assert.strictEqual(
newBucketInfo.ownerDisplayName,
newOwnerDisplayName);
done(null);
});
});
});
});
it('Should fail to list a non existing bucket', done => {
dispatcher.get('/default/bucket/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('Should fail to get attributes from a non existing bucket', done => {
dispatcher.get('/default/attributes/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('should succeed a health check', done => {
dispatcher.get('/_/healthcheck', (err, response, body) => {
if (err) {
return done(err);
}
const expectedResponse = {
memorybucket: {
code: 200,
message: 'OK',
},
};
assert.strictEqual(response.responseHead.statusCode, 200);
assert.deepStrictEqual(body, expectedResponse);
return done(err);
});
});
it('should work with parallel route', done => {
const objectName = 'theObj';
async.waterfall([
next => _createObjects([objectName], next),
next => {
dispatcher.get(
`/default/parallel/${Bucket}/${objectName}`,
(err, response, body) => {
if (err) {
return next(err);
}
assert.strictEqual(response.responseHead.statusCode,
200);
const bucketMD = JSON.parse(body.bucket);
const objectMD = JSON.parse(body.obj);
const expectedObjectMD = { key: objectName };
assert.deepStrictEqual(bucketMD.name,
bucketInfo.name);
assert.deepStrictEqual(objectMD, expectedObjectMD);
return next(err);
});
},
next => _deleteObjects([objectName], next),
], done);
});
});

View File

@ -0,0 +1,318 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const RedisClient = require('../../../lib/metrics/RedisClient');
const StatsModel = require('../../../lib/metrics/StatsModel');
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: false,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const STATS_INTERVAL = 300; // 5 minutes
const STATS_EXPIRY = 86400; // 24 hours
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
function setExpectedStats(expected) {
return expected.concat(
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
}
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('StatsModel class', () => {
const id = 'arsenal-test';
const id2 = 'test-2';
const id3 = 'test-3';
afterEach(() => redisClient.clear(() => {}));
it('should convert a 2d array columns into rows and vice versa using _zip',
() => {
const arrays = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
];
const res = statsModel._zip(arrays);
const expected = [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
];
assert.deepStrictEqual(res, expected);
});
it('_zip should return an empty array if given an invalid array', () => {
const arrays = [];
const res = statsModel._zip(arrays);
assert.deepStrictEqual(res, []);
});
it('_getCount should return a an array of all valid integer values',
() => {
const res = statsModel._getCount([
[null, '1'],
[null, '2'],
[null, null],
]);
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
});
it('should correctly record a new request by default one increment',
done => {
async.series([
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 2], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should record new requests by defined amount increments', done => {
function noop() {}
async.series([
next => {
statsModel.reportNewRequest(id, 9);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
next();
});
},
next => {
statsModel.reportNewRequest(id);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([10]));
next();
});
},
next => {
statsModel.reportNewRequest(id, noop);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([11]));
next();
});
},
], done);
});
it('should correctly record a 500 on the server', done => {
statsModel.report500(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
done();
});
});
it('should respond back with total requests as an array', done => {
async.series([
next => {
statsModel.reportNewRequest(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.report500(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([1]),
'500s': setExpectedStats([1]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should not crash on empty results', done => {
async.series([
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should return a zero-filled array if no ids are passed to getAllStats',
done => {
statsModel.getAllStats(fakeLogger, [], (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([]));
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
done();
});
});
it('should get accurately reported data for given id from getAllStats',
done => {
statsModel.reportNewRequest(id, 9);
statsModel.reportNewRequest(id2, 2);
statsModel.reportNewRequest(id3, 3);
statsModel.report500(id);
async.series([
next => {
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 9);
assert.equal(res['500s'][0], 1);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, [id, id2, id3],
(err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 14);
assert.deepStrictEqual(res.requests,
setExpectedStats([14]));
next();
});
},
], done);
});
it('should normalize to the nearest hour using normalizeTimestampByHour',
() => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = new Date(statsModel.normalizeTimestampByHour(date));
assert.strictEqual(date.getHours(), newDate.getHours());
assert.strictEqual(newDate.getMinutes(), 0);
assert.strictEqual(newDate.getSeconds(), 0);
assert.strictEqual(newDate.getMilliseconds(), 0);
});
it('should get previous hour using _getDatePreviousHour', () => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = statsModel._getDatePreviousHour(new Date(date));
const millisecondsInOneHour = 3600000;
assert.strictEqual(date - newDate, millisecondsInOneHour);
});
it('should get an array of hourly timestamps using getSortedSetHours',
() => {
const epoch = 1536882476501;
const millisecondsInOneHour = 3600000;
const expected = [];
let dateInMilliseconds = statsModel.normalizeTimestampByHour(
new Date(epoch));
for (let i = 0; i < 24; i++) {
expected.push(dateInMilliseconds);
dateInMilliseconds -= millisecondsInOneHour;
}
const res = statsModel.getSortedSetHours(epoch);
assert.deepStrictEqual(res, expected);
});
it('should apply TTL on a new sorted set using addToSortedSet', done => {
const key = 'a-test-key';
const score = 100;
const value = 'a-value';
const now = Date.now();
const nearestHour = statsModel.normalizeTimestampByHour(new Date(now));
statsModel.addToSortedSet(key, score, value, (err, res) => {
assert.ifError(err);
// check both a "zadd" and "expire" occurred
assert.equal(res, 1);
redisClient.ttl(key, (err, res) => {
assert.ifError(err);
// assert this new set has a ttl applied
assert(res > 0);
const adjustmentSecs = now - nearestHour;
const msInADay = 24 * 60 * 60 * 1000;
const msInAnHour = 60 * 60 * 1000;
const upperLimitSecs =
Math.ceil((msInADay - adjustmentSecs) / 1000);
const lowerLimitSecs =
Math.floor((msInADay - adjustmentSecs - msInAnHour) / 1000);
// assert new ttl is between 23 and 24 hours adjusted by time
// elapsed since normalized hourly time
assert(res >= lowerLimitSecs);
assert(res <= upperLimitSecs);
done();
});
});
});
});

View File

@ -269,4 +269,33 @@ describe('v4 headerAuthCheck', () => {
assert.strictEqual(res.params.version, 4); assert.strictEqual(res.params.version, 4);
done(); done();
}); });
it('should not return error if proxy_path header is added', done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454962445000);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({
proxy_path: 'proxy/1234' }, 'headers', request, headers);
/* eslint-enable camelcase */
const res = headerAuthCheck(alteredRequest, log);
clock.uninstall();
assert.strictEqual(res.err, null);
done();
});
it('should return InvalidRequest error if proxy_path header is invalid',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454962445000);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({
proxy_path: 'absc%2proxy/1234' }, 'headers', request, headers);
/* eslint-enable camelcase */
const res = headerAuthCheck(alteredRequest, log);
clock.uninstall();
assert.deepStrictEqual(res.err,
errors.InvalidArgument.customizeDescription(
'invalid proxy_path header'));
done();
});
}); });

View File

@ -225,4 +225,34 @@ describe('v4 queryAuthCheck', () => {
assert.strictEqual(res.params.version, 4); assert.strictEqual(res.params.version, 4);
done(); done();
}); });
it('should successfully return no error if proxy_path header is added',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454974984001);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({ proxy_path:
'proxy/1234' }, 'headers', request, query);
/* eslint-enable camelcase */
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
clock.uninstall();
assert.deepStrictEqual(res.err, null);
done();
});
it('should return InvalidRequest error if proxy_path header is invalid',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454974984001);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({ proxy_path:
'absc%2proxy/1234' }, 'headers', request, query);
/* eslint-enable camelcase */
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
clock.uninstall();
assert.deepStrictEqual(res.err,
errors.InvalidArgument.customizeDescription(
'invalid proxy_path header'));
done();
});
}); });

View File

@ -0,0 +1,172 @@
'use strict'; // eslint-disable-line strict
/* eslint new-cap: "off" */
const assert = require('assert');
const TTLVCodec = require('../../../lib/network/kmip/codec/ttlv.js');
const KMIP = require('../../../lib/network/kmip');
const ttlvFixtures = require('../../utils/kmip/ttlvFixtures');
const badTtlvFixtures = require('../../utils/kmip/badTtlvFixtures');
const messageFixtures = require('../../utils/kmip/messageFixtures');
const { logger } = require('../../utils/kmip/ersatz.js');
function newKMIP() {
return new KMIP(TTLVCodec,
class DummyTransport {},
{ kmip: {} }, () => {});
}
describe('KMIP TTLV Codec', () => {
it('should map, encode and decode an extension', done => {
const kmip = newKMIP();
kmip.mapExtension('Dummy Extension', 0x54a000);
const msg = KMIP.Message([
KMIP.TextString('Dummy Extension', 'beautiful'),
]);
const encodedMsg = kmip._encodeMessage(msg);
const decodedMsg = kmip._decodeMessage(logger, encodedMsg);
assert.deepStrictEqual(msg, decodedMsg);
done();
});
ttlvFixtures.forEach((item, idx) => {
['request', 'response'].forEach(fixture => {
it(`should decode the TTLV ${fixture} fixture[${idx}]`, done => {
const kmip = newKMIP();
const msg = kmip._decodeMessage(logger, item[fixture]);
if (!item.degenerated) {
const encodedMsg = kmip._encodeMessage(msg);
assert(encodedMsg.compare(item[fixture]) === 0);
}
done();
});
});
});
it('should validate supported operations', done => {
const kmip = newKMIP();
const msg = kmip._decodeMessage(logger, ttlvFixtures[1].response);
const supportedOperations =
msg.lookup('Response Message/Batch Item/' +
'Response Payload/Operation');
const supportedObjectTypes =
msg.lookup('Response Message/Batch Item/' +
'Response Payload/Object Type');
const protocolVersionMajor =
msg.lookup('Response Message/Response Header/' +
'Protocol Version/Protocol Version Major');
const protocolVersionMinor =
msg.lookup('Response Message/Response Header/' +
'Protocol Version/Protocol Version Minor');
assert(supportedOperations.includes('Encrypt'));
assert(supportedOperations.includes('Decrypt'));
assert(supportedOperations.includes('Create'));
assert(supportedOperations.includes('Destroy'));
assert(supportedOperations.includes('Query'));
assert(supportedObjectTypes.includes('Symmetric Key'));
assert(protocolVersionMajor[0] >= 2 ||
(protocolVersionMajor[0] === 1 &&
protocolVersionMinor[0] >= 2));
done();
});
it('should detect unsupported operations', done => {
const kmip = newKMIP();
const msg = kmip._decodeMessage(logger, ttlvFixtures[2].response);
const supportedOperations =
msg.lookup('Response Message/Batch Item/' +
'Response Payload/Operation');
assert(!supportedOperations.includes('Encrypt'));
assert(!supportedOperations.includes('Decrypt'));
done();
});
it('should support non canonical search path', done => {
const kmip = newKMIP();
const msg = kmip._decodeMessage(logger, ttlvFixtures[1].response);
const supportedOperations =
msg.lookup('/Response Message/Batch Item/' +
'Response Payload/Operation');
const supportedObjectTypes =
msg.lookup('Response Message/Batch Item/' +
'Response Payload/Object Type/');
const protocolVersionMajor =
msg.lookup('Response Message//Response Header///' +
'Protocol Version////Protocol Version Major');
const protocolVersionMinor =
msg.lookup('/Response Message////Response Header///' +
'Protocol Version//Protocol Version Minor/');
assert(supportedOperations.includes('Encrypt'));
assert(supportedOperations.includes('Decrypt'));
assert(supportedOperations.includes('Create'));
assert(supportedOperations.includes('Destroy'));
assert(supportedOperations.includes('Query'));
assert(supportedObjectTypes.includes('Symmetric Key'));
assert(protocolVersionMajor[0] >= 2 ||
(protocolVersionMajor[0] === 1 &&
protocolVersionMinor[0] >= 2));
done();
});
it('should return nothing with an empty search path', done => {
const kmip = newKMIP();
const msg = kmip._decodeMessage(logger, ttlvFixtures[2].response);
const empty1 = msg.lookup('');
const empty2 = msg.lookup('////');
assert(empty1.length === 0);
assert(empty2.length === 0);
done();
});
it('should encode/decode a bit mask', done => {
const kmip = newKMIP();
const usageMask = ['Encrypt', 'Decrypt', 'Export'];
const decodedMask =
kmip.decodeMask('Cryptographic Usage Mask',
kmip.encodeMask('Cryptographic Usage Mask',
usageMask));
assert.deepStrictEqual(usageMask.sort(), decodedMask.sort());
done();
});
it('should detect invalid bit name', done => {
const kmip = newKMIP();
const usageMask = ['Encrypt', 'Decrypt', 'Exprot'];
try {
kmip.encodeMask('Cryptographic Usage Mask', usageMask);
done(Error('Must not succeed'));
} catch (e) {
done();
}
});
messageFixtures.forEach((item, idx) => {
it(`should encode the KMIP message fixture[${idx}]`, done => {
const kmip = newKMIP();
const encodedMessage = kmip._encodeMessage(item);
const decodedMessage = kmip._decodeMessage(logger, encodedMessage);
assert.deepStrictEqual(item.content, decodedMessage.content);
done();
});
});
badTtlvFixtures.forEach((rawMessage, idx) => {
it(`should fail to parse invalid TTLV message fixture[${idx}]`,
done => {
const kmip = newKMIP();
try {
kmip._decodeMessage(logger, rawMessage);
done(Error('Must not succeed'));
} catch (e) {
done();
}
});
});
});

View File

@ -59,6 +59,7 @@ const testWebsiteConfiguration = new WebsiteConfiguration({
}); });
const testLocationConstraint = 'us-west-1'; const testLocationConstraint = 'us-west-1';
const testReadLocationConstraint = 'us-west-2';
const testCorsConfiguration = [ const testCorsConfiguration = [
{ id: 'test', { id: 'test',
@ -115,6 +116,8 @@ const testLifecycleConfiguration = {
}, },
], ],
}; };
const testUid = '99ae3446-7082-4c17-ac97-52965dc004ec';
// create a dummy bucket to test getters and setters // create a dummy bucket to test getters and setters
Object.keys(acl).forEach( Object.keys(acl).forEach(
@ -132,7 +135,8 @@ Object.keys(acl).forEach(
testWebsiteConfiguration, testWebsiteConfiguration,
testCorsConfiguration, testCorsConfiguration,
testReplicationConfiguration, testReplicationConfiguration,
testLifecycleConfiguration); testLifecycleConfiguration,
testUid, undefined, true);
describe('serialize/deSerialize on BucketInfo class', () => { describe('serialize/deSerialize on BucketInfo class', () => {
const serialized = dummyBucket.serialize(); const serialized = dummyBucket.serialize();
@ -151,6 +155,7 @@ Object.keys(acl).forEach(
versioningConfiguration: versioningConfiguration:
dummyBucket._versioningConfiguration, dummyBucket._versioningConfiguration,
locationConstraint: dummyBucket._locationConstraint, locationConstraint: dummyBucket._locationConstraint,
readLocationConstraint: dummyBucket._readLocationConstraint,
websiteConfiguration: dummyBucket._websiteConfiguration websiteConfiguration: dummyBucket._websiteConfiguration
.getConfig(), .getConfig(),
cors: dummyBucket._cors, cors: dummyBucket._cors,
@ -158,6 +163,8 @@ Object.keys(acl).forEach(
dummyBucket._replicationConfiguration, dummyBucket._replicationConfiguration,
lifecycleConfiguration: lifecycleConfiguration:
dummyBucket._lifecycleConfiguration, dummyBucket._lifecycleConfiguration,
uid: dummyBucket._uid,
isNFS: dummyBucket._isNFS,
}; };
assert.strictEqual(serialized, JSON.stringify(bucketInfos)); assert.strictEqual(serialized, JSON.stringify(bucketInfos));
done(); done();
@ -182,6 +189,7 @@ Object.keys(acl).forEach(
'string'); 'string');
assert.strictEqual(typeof dummyBucket.getCreationDate(), assert.strictEqual(typeof dummyBucket.getCreationDate(),
'string'); 'string');
assert.strictEqual(typeof dummyBucket.getUid(), 'string');
}); });
it('this should have the right acl\'s types', () => { it('this should have the right acl\'s types', () => {
assert.strictEqual(typeof dummyBucket.getAcl(), 'object'); assert.strictEqual(typeof dummyBucket.getAcl(), 'object');
@ -249,6 +257,18 @@ Object.keys(acl).forEach(
assert.deepStrictEqual(dummyBucket.getLocationConstraint(), assert.deepStrictEqual(dummyBucket.getLocationConstraint(),
testLocationConstraint); testLocationConstraint);
}); });
it('getReadLocationConstraint should return locationConstraint ' +
'if readLocationConstraint hasn\'t been set', () => {
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
testLocationConstraint);
});
it('getReadLocationConstraint should return readLocationConstraint',
() => {
dummyBucket._readLocationConstraint =
testReadLocationConstraint;
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
testReadLocationConstraint);
});
it('getCors should return CORS configuration', () => { it('getCors should return CORS configuration', () => {
assert.deepStrictEqual(dummyBucket.getCors(), assert.deepStrictEqual(dummyBucket.getCors(),
testCorsConfiguration); testCorsConfiguration);
@ -257,6 +277,16 @@ Object.keys(acl).forEach(
assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(), assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(),
testLifecycleConfiguration); testLifecycleConfiguration);
}); });
it('getUid should return unique id of bucket', () => {
assert.deepStrictEqual(dummyBucket.getUid(), testUid);
});
it('isNFS should return whether bucket is on NFS', () => {
assert.deepStrictEqual(dummyBucket.isNFS(), true);
});
it('setIsNFS should set whether bucket is on NFS', () => {
dummyBucket.setIsNFS(false);
assert.deepStrictEqual(dummyBucket.isNFS(), false);
});
}); });
describe('setters on BucketInfo class', () => { describe('setters on BucketInfo class', () => {
@ -328,8 +358,7 @@ Object.keys(acl).forEach(
protocol: 'https', protocol: 'https',
}, },
}; };
dummyBucket dummyBucket.setWebsiteConfiguration(newWebsiteConfiguration);
.setWebsiteConfiguration(newWebsiteConfiguration);
assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(), assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(),
newWebsiteConfiguration); newWebsiteConfiguration);
}); });
@ -381,3 +410,26 @@ Object.keys(acl).forEach(
}); });
}) })
); );
describe('uid default', () => {
it('should set uid if none is specified by constructor params', () => {
const dummyBucket = new BucketInfo(
bucketName, owner, ownerDisplayName, testDate,
BucketInfo.currentModelVersion(), acl[emptyAcl],
false, false, {
cryptoScheme: 1,
algorithm: 'sha1',
masterKeyId: 'somekey',
mandatory: true,
}, testVersioningConfiguration,
testLocationConstraint,
testWebsiteConfiguration,
testCorsConfiguration,
testReplicationConfiguration,
testLifecycleConfiguration);
const defaultUid = dummyBucket.getUid();
assert(defaultUid);
assert.strictEqual(defaultUid.length, 36);
});
});

View File

@ -175,6 +175,10 @@ function generateFilter(errorTag, tagObj) {
middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' + middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' +
`<Prefix>${tagObj.lastPrefix}</Prefix>`; `<Prefix>${tagObj.lastPrefix}</Prefix>`;
} }
if (tagObj.label === 'mult-tags') {
middleTags = '<And><Tag><Key>color</Key><Value>blue</Value></Tag>' +
'<Tag><Key>shape</Key><Value>circle</Value></Tag></And>';
}
Filter = `<Filter>${middleTags}</Filter>`; Filter = `<Filter>${middleTags}</Filter>`;
if (tagObj.label === 'also-prefix') { if (tagObj.label === 'also-prefix') {
Filter = '<Filter></Filter><Prefix></Prefix>'; Filter = '<Filter></Filter><Prefix></Prefix>';
@ -349,4 +353,16 @@ describe('LifecycleConfiguration class getLifecycleConfiguration', () => {
done(); done();
}); });
}); });
it('should apply all unique Key tags if multiple tags included', done => {
tagObj.label = 'mult-tags';
generateParsedXml('Filter', tagObj, parsedXml => {
const lcConfig = new LifecycleConfiguration(parsedXml).
getLifecycleConfiguration();
const expected = [{ key: 'color', val: 'blue' },
{ key: 'shape', val: 'circle' }];
assert.deepStrictEqual(expected, lcConfig.rules[0].filter.tags);
done();
});
});
}); });

View File

@ -0,0 +1,74 @@
const assert = require('assert');
const { parseString } = require('xml2js');
const werelogs = require('werelogs');
const ReplicationConfiguration =
require('../../../lib/models/ReplicationConfiguration');
const logger = new werelogs.Logger('test:ReplicationConfiguration');
const mockedConfig = {
replicationEndpoints: [{
type: 'scality',
site: 'ring',
default: true,
}, {
type: 'aws_s3',
site: 'awsbackend',
}, {
type: 'gcp',
site: 'gcpbackend',
}, {
type: 'azure',
site: 'azurebackend',
}],
};
function getXMLConfig(hasPreferredRead) {
return `
<ReplicationConfiguration>
<Role>arn:aws:iam::root:role/s3-replication-role</Role>
<Rule>
<ID>Replication-Rule-1</ID>
<Status>Enabled</Status>
<Prefix>someprefix/</Prefix>
<Destination>
<Bucket>arn:aws:s3:::destbucket</Bucket>
<StorageClass>awsbackend,` +
`gcpbackend${hasPreferredRead ? ':preferred_read' : ''},azurebackend` +
`</StorageClass>
</Destination>
</Rule>
</ReplicationConfiguration>
`;
}
describe('ReplicationConfiguration class', () => {
it('should parse replication config XML without preferred read', done => {
const repConfigXML = getXMLConfig(false);
parseString(repConfigXML, (err, parsedXml) => {
assert.ifError(err);
const repConf = new ReplicationConfiguration(
parsedXml, logger, mockedConfig);
const repConfErr = repConf.parseConfiguration();
assert.ifError(repConfErr);
assert.strictEqual(repConf.getPreferredReadLocation(), null);
done();
});
});
it('should parse replication config XML with preferred read', done => {
const repConfigXML = getXMLConfig(true);
parseString(repConfigXML, (err, parsedXml) => {
assert.ifError(err);
const repConf = new ReplicationConfiguration(
parsedXml, logger, mockedConfig);
const repConfErr = repConf.parseConfiguration();
assert.ifError(repConfErr);
assert.strictEqual(repConf.getPreferredReadLocation(),
'gcpbackend');
done();
});
});
});

View File

@ -82,6 +82,7 @@ describe('ObjectMD class setters/getters', () => {
role: '', role: '',
storageType: '', storageType: '',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}], }],
['ReplicationInfo', { ['ReplicationInfo', {
status: 'PENDING', status: 'PENDING',
@ -97,8 +98,11 @@ describe('ObjectMD class setters/getters', () => {
'arn:aws:iam::account-id:role/dest-resource', 'arn:aws:iam::account-id:role/dest-resource',
storageType: 'aws_s3', storageType: 'aws_s3',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}], }],
['DataStoreName', null, ''], ['DataStoreName', null, ''],
['ReplicationIsNFS', null, null],
['ReplicationIsNFS', true],
].forEach(test => { ].forEach(test => {
const property = test[0]; const property = test[0];
const testValue = test[1]; const testValue = test[1];
@ -192,6 +196,15 @@ describe('ObjectMD class setters/getters', () => {
assert.strictEqual( assert.strictEqual(
md.getReplicationSiteDataStoreVersionId('zenko'), 'a'); md.getReplicationSiteDataStoreVersionId('zenko'), 'a');
}); });
it('ObjectMd::isMultipartUpload', () => {
md.setContentMd5('68b329da9893e34099c7d8ad5cb9c940');
assert.strictEqual(md.isMultipartUpload(), false);
md.setContentMd5('741e0f4bad5b093044dc54a74d911094-1');
assert.strictEqual(md.isMultipartUpload(), true);
md.setContentMd5('bda0c0bed89c8bdb9e409df7ae7073c5-9876');
assert.strictEqual(md.isMultipartUpload(), true);
});
}); });
describe('ObjectMD import from stored blob', () => { describe('ObjectMD import from stored blob', () => {

View File

@ -0,0 +1,170 @@
const assert = require('assert');
const HealthProbeServer =
require('../../../../lib/network/probe/HealthProbeServer');
const http = require('http');
function makeRequest(meth, uri) {
const params = {
hostname: 'localhost',
port: 4042,
method: meth,
path: uri,
};
const req = http.request(params);
req.setNoDelay(true);
return req;
}
const endpoints = [
'/_/health/liveness',
'/_/health/readiness',
];
const badEndpoints = [
'/_/health/liveness_thisiswrong',
'/_/health/readiness_thisiswrong',
];
describe('network.probe.HealthProbeServer', () => {
describe('service is "up"', () => {
let server;
function setup(done) {
server = new HealthProbeServer({ port: 4042 });
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a GET and ' +
'return 200 OK', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 200);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('service is "down"', () => {
let server;
function setup(done) {
function falseStub() {
return false;
}
server = new HealthProbeServer({
port: 4042,
livenessCheck: falseStub,
readinessCheck: falseStub,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a GET and ' +
'return 503 ServiceUnavailable', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 503);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('Invalid Methods', () => {
let server;
function setup(done) {
server = new HealthProbeServer({
port: 4042,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a POST and ' +
'return 405 MethodNotAllowed', done => {
makeRequest('POST', ep)
.on('response', res => {
assert(res.statusCode === 405);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('Invalid URI', () => {
let server;
function setup(done) {
server = new HealthProbeServer({
port: 4042,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
badEndpoints.forEach(ep => {
it('should perform a GET and ' +
'return 400 InvalidURI', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 400);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
});

View File

@ -7,6 +7,7 @@ const {
_checkEtagNoneMatch, _checkEtagNoneMatch,
_checkModifiedSince, _checkModifiedSince,
_checkUnmodifiedSince, _checkUnmodifiedSince,
checkDateModifiedHeaders,
validateConditionalHeaders, validateConditionalHeaders,
} = require('../../../lib/s3middleware/validateConditionalHeaders'); } = require('../../../lib/s3middleware/validateConditionalHeaders');
@ -172,6 +173,59 @@ describe('validateConditionalHeaders util function ::', () => {
}); });
}); });
describe('checkDateModifiedHeaders util function: ', () => {
const expectedSuccess = {
present: true,
error: null,
};
const expectedAbsense = {
present: false,
error: null,
};
it('should return NotModified error for \'if-modified-since\' header',
() => {
const header = {};
header['if-modified-since'] = afterLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(modifiedSinceRes.error, errors.NotModified);
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
});
it('should return PreconditionFailed error for \'if-unmodified-since\' ' +
'header', () => {
const header = {};
header['if-unmodified-since'] = beforeLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(unmodifiedSinceRes.error,
errors.PreconditionFailed);
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
});
it('should succeed if \'if-modified-since\' header value is earlier ' +
'than last modified', () => {
const header = {};
header['if-modified-since'] = beforeLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(modifiedSinceRes, expectedSuccess);
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
});
it('should succeed if \'if-unmodified-since\' header value is later ' +
'than last modified', () => {
const header = {};
header['if-unmodified-since'] = afterLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(unmodifiedSinceRes, expectedSuccess);
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
});
});
describe('_checkEtagMatch function :', () => { describe('_checkEtagMatch function :', () => {
const expectedSuccess = { const expectedSuccess = {
present: true, present: true,

View File

@ -0,0 +1,36 @@
const assert = require('assert');
const routesUtils = require('../../../../lib/s3routes/routesUtils.js');
const bannedStr = 'banned';
const prefixBlacklist = [];
// byte size of 915
const keyutf8 = '%EA%9D%8B崰㈌㒈保轖䳷䀰⺩ቆ楪秲ⴝ㿅鼎퇬枅࿷염곞召㸾⌙ꪊᆐ庍뉆䌗幐鸆䛃➟녩' +
'ˍ뙪臅⠙≼绒벊냂詴 끴鹲萯⇂㭢䈊퉉楝舳㷖족痴䧫㾵᏷ำꎆ꼵껪멷㄀誕㳓腜쒃컹㑻鳃삚舿췈孨੦⮀NJ곓⵪꺼꜈' +
'嗼뫘悕錸瑺⁤륒㜓垻ㆩꝿ詀펉ᆙ舑䜾힑藪碙ꀎꂰ췊Ᏻ 㘺幽醛잯ද汧Ꟑꛒⶨ쪸숞헹㭔ꡔᘼ뺓ᡆ᡾ᑟ䅅퀭耓弧⢠⇙' +
'폪ް蛧⃪Ἔ돫ꕢ븥ヲ캂䝄쟐颺ᓾ둾Ұ껗礞ᾰ瘹蒯硳풛瞋襎奺熝妒컚쉴⿂㽝㝳駵鈚䄖戭䌸᫲ᇁ䙪鸮ᐴ稫ⶭ뀟ھ⦿' +
'䴳稉ꉕ捈袿놾띐✯伤䃫⸧ꠏ瘌틳藔ˋ㫣敀䔩㭘식↴⧵佶痊牌ꪌ搒꾛æᤈべ쉴挜敗羥誜嘳ֶꫜ걵ࣀ묟ኋ拃秷膤䨸菥' +
'䟆곘縧멀煣卲챸⧃⏶혣ਔ뙞밺㊑ک씌촃Ȅ頰ᖅ懚ホῐ꠷㯢먈㝹୥밷㮇䘖桲阥黾噘烻ᓧ鈠ᴥ徰穆ꘛ蹕綻表鯍裊' +
'鮕漨踒ꠍ픸Ä☶莒浏钸목탬툖氭ˠٸ൪㤌ᶟ訧ᜒೳ揪Ⴛ摖㸣᳑⹞걀ꢢ䏹ῖ"';
describe('routesUtils.isValidObjectKey', () => {
it('should return isValid false if object key name starts with a ' +
'blacklisted prefix', () => {
const result = routesUtils.isValidObjectKey('bannedkey', [bannedStr]);
// return { isValid: false, invalidPrefix };
assert.strictEqual(result.isValid, false);
assert.strictEqual(result.invalidPrefix, bannedStr);
});
it('should return isValid false if object key name exceeds length of 915',
() => {
const key = 'a'.repeat(916);
const result = routesUtils.isValidObjectKey(key, prefixBlacklist);
assert.strictEqual(result.isValid, false);
});
it('should return isValid true for a utf8 string of byte size 915', () => {
const result = routesUtils.isValidObjectKey(keyutf8, prefixBlacklist);
assert.strictEqual(result.isValid, true);
});
});

View File

@ -0,0 +1,76 @@
'use strict'; //eslint-disable-line
const assert = require('assert');
const { markerFilterMPU } =
require('../../../../../lib/storage/metadata/in_memory/bucket_utilities');
function dupeArray(arr) {
const dupe = [];
arr.forEach(i => {
dupe.push(Object.assign({}, i));
});
return dupe;
}
describe('bucket utility methods for in_memory backend', () => {
it('should return an array of multipart uploads starting with the item ' +
'right after the specified keyMarker and uploadIdMarker', () => {
const mpus = [
{
key: 'key-1',
uploadId: '2624ca6080c841d48a2481941df868a9',
},
{
key: 'key-1',
uploadId: '4ffeca96b0c24ea9b538b8f0b60cede3',
},
{
key: 'key-1',
uploadId: '52e5b94474894990a2b94330bb3c8fa9',
},
{
key: 'key-1',
uploadId: '54e530c5d4c741898c8e161d426591cb',
},
{
key: 'key-1',
uploadId: '6cc59f9d29254e81ab6cb6332fb46314',
},
{
key: 'key-1',
uploadId: 'fe9dd10776c9476697632d0b55960a05',
},
{
key: 'key-2',
uploadId: '68e24ccb96c14beea79bf01fc130fdf5',
},
];
[
{
keyMarker: 'key-1',
uploadIdMarker: '54e530c5d4c741898c8e161d426591cb',
expected: 3,
},
{
keyMarker: 'key-2',
uploadIdMarker: '68e24ccb96c14beea79bf01fc130fdf5',
expected: 0,
},
{
keyMarker: 'key-1',
uploadIdMarker: '2624ca6080c841d48a2481941df868a9',
expected: 6,
},
].forEach(item => {
const res = markerFilterMPU(item, dupeArray(mpus));
assert.equal(res.length, item.expected);
const expected = mpus.slice(mpus.length - res.length);
assert.deepStrictEqual(res, expected);
});
});
});

View File

@ -0,0 +1,507 @@
const assert = require('assert');
const {
NEW_OBJ,
NEW_VER,
UPDATE_VER,
UPDATE_MST,
RESTORE,
DEL_VER,
DEL_MST,
DataCounter,
} = require('../../../../../lib/storage/metadata/mongoclient/DataCounter');
const refZeroObj = {
objects: 0,
versions: 0,
dataManaged: {
total: { curr: 0, prev: 0 },
byLocation: {},
},
stalled: 0,
};
const refSingleObj = {
objects: 2,
versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
},
},
stalled: 0,
};
const refSingleObjVer = {
objects: 1,
versions: 1,
dataManaged: {
total: { curr: 100, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
},
},
stalled: 0,
};
const refMultiObjVer = {
objects: 1,
versions: 1,
dataManaged: {
total: { curr: 200, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
locationTwo: { curr: 100, prev: 100 },
},
},
stalled: 0,
};
const refMultiObj = {
objects: 2,
versions: 0,
dataManaged: {
total: { curr: 400, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
stalled: 0,
};
const singleSite = size => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': {
backends: [],
},
});
const multiSite = (size, isComplete) => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': {
backends: [{
site: 'locationTwo',
status: isComplete ? 'COMPLETED' : 'PENDING',
}],
},
});
const transientSite = (size, status, backends) => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': { status, backends },
});
const locationConstraints = {
locationOne: { isTransient: true },
locationTwo: { isTransient: false },
};
const dataCounter = new DataCounter();
describe('DataCounter Class', () => {
it('should create a zero object', () => {
dataCounter.set(refZeroObj);
assert.deepStrictEqual(dataCounter.results(), refZeroObj);
});
it('should skip dataCounter methods if initial values are not set', () => {
const testCounter = new DataCounter();
testCounter.addObject(singleSite(100), null, NEW_OBJ);
assert.deepStrictEqual(testCounter.results(), refZeroObj);
});
});
describe('DateCounter::updateTransientList', () => {
afterEach(() => dataCounter.updateTransientList({}));
it('should set transient list', () => {
assert.deepStrictEqual(dataCounter.transientList, {});
dataCounter.updateTransientList(locationConstraints);
const expectedRes = { locationOne: true, locationTwo: false };
assert.deepStrictEqual(dataCounter.transientList, expectedRes);
});
});
describe('DataCounter::addObject', () => {
const tests = [
{
it: 'should correctly update DataCounter, new object one site',
init: refZeroObj,
input: [singleSite(100), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 100, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, new object multi site',
init: refZeroObj,
input: [multiSite(100, true), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, overwrite single site',
init: refSingleObj,
input: [singleSite(100), singleSite(50), NEW_OBJ],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 250, prev: 0 },
byLocation: {
locationOne: { curr: 250, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, overwrite multi site',
init: refMultiObj,
input: [multiSite(100, true), multiSite(50, true), NEW_OBJ],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 500, prev: 0 },
byLocation: {
locationOne: { curr: 250, prev: 0 },
locationTwo: { curr: 250, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, new version single site',
init: refSingleObj,
input: [singleSite(100), singleSite(50), NEW_VER],
expectedRes: {
objects: 2, versions: 1,
dataManaged: {
total: { curr: 250, prev: 50 },
byLocation: {
locationOne: { curr: 250, prev: 50 },
},
},
},
},
{
it: 'should correctly update DataCounter, new version multi site',
init: refMultiObj,
input: [multiSite(100, true), multiSite(50, true), NEW_VER],
expectedRes: {
objects: 2, versions: 1,
dataManaged: {
total: { curr: 500, prev: 100 },
byLocation: {
locationOne: { curr: 250, prev: 50 },
locationTwo: { curr: 250, prev: 50 },
},
},
},
},
{
it: 'should correctly ignore pending status, multi site',
init: refZeroObj,
input: [multiSite(100, false), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 100, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'replication completion update in master object',
init: refSingleObj,
input: [multiSite(100, true), multiSite(100, false), UPDATE_MST],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 300, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'replication completion update in versioned object',
init: refSingleObjVer,
input: [multiSite(100, true), multiSite(100, false), UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
locationTwo: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'restoring versioned object as master',
init: refMultiObjVer,
input: [multiSite(100, true), multiSite(100, true), RESTORE],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 400, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
},
},
];
tests.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.addObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});
describe('DataCounter, update with transient location', () => {
before(() => dataCounter.updateTransientList(locationConstraints));
after(() => dataCounter.updateTransientList({}));
const pCurrMD = transientSite(100, 'PENDING', [
{ site: 'site1', status: 'PENDING' },
{ site: 'site2', status: 'COMPLETED' },
]);
const cCurrMD = transientSite(100, 'COMPLETED', [
{ site: 'site1', status: 'COMPLETED' },
{ site: 'site2', status: 'COMPLETED' },
]);
const prevMD = transientSite(100, 'PENDING', [
{ site: 'site1', status: 'PENDING' },
{ site: 'site2', status: 'PENDING' },
]);
const transientTest = [
{
it: 'should correctly update DataCounter, ' +
'version object, replication status = PENDING',
init: refSingleObjVer,
input: [pCurrMD, prevMD, UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
site2: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'version object, replication status = COMPLETED',
init: refSingleObjVer,
input: [cCurrMD, prevMD, UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
site1: { curr: 0, prev: 100 },
site2: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'master object, replication status = PENDING',
init: refSingleObjVer,
input: [pCurrMD, prevMD, UPDATE_MST],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
site2: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'master object, replication status = COMPLETED',
init: refSingleObjVer,
input: [cCurrMD, prevMD, UPDATE_MST],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 0, prev: 100 },
site1: { curr: 100, prev: 0 },
site2: { curr: 100, prev: 0 },
},
},
},
},
];
transientTest.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.addObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});
describe('DataCounter::delObject', () => {
const tests = [
{
it: 'should correctly update DataCounter, ' +
'delete master object single site',
init: refMultiObj,
input: [singleSite(100), DEL_MST],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 300, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete master object multi site',
init: refMultiObj,
input: [multiSite(100, true), DEL_MST],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete versioned object single site',
init: refMultiObjVer,
input: [singleSite(100), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete versioned object multi site',
init: refMultiObjVer,
input: [multiSite(100, true), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should clamp negative values to 0, master object',
init: refMultiObjVer,
input: [multiSite(300, true), DEL_MST],
expectedRes: {
objects: 0, versions: 1,
dataManaged: {
total: { curr: 0, prev: 200 },
byLocation: {
locationOne: { curr: 0, prev: 100 },
locationTwo: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should clamp negative values to 0, versioned object',
init: refMultiObjVer,
input: [multiSite(300, true), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
];
tests.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.delObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});

View File

@ -0,0 +1,367 @@
const assert = require('assert');
const { Timestamp } = require('bson');
const ListRecordStream = require(
'../../../../../lib/storage/metadata/mongoclient/ListRecordStream');
const DummyRequestLogger = require('./utils/DummyRequestLogger');
const logger = new DummyRequestLogger();
const mongoProcessedLogEntries = {
insert: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'i',
ns: 'metadata.replicated-bucket',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
value: {
someField: 'someValue',
},
},
},
updateObject: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'u',
ns: 'metadata.replicated-bucket',
o2: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
},
o: {
$set: {
value: {
someField: 'someUpdatedValue',
},
},
},
},
deleteObject: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'd',
ns: 'metadata.replicated-bucket',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
},
},
putBucketAttributes: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'u',
ns: 'metadata.__metastore',
o2: {
_id: 'new-bucket',
}, o: {
_id: 'new-bucket',
value: {
someField: 'someValue',
},
},
},
deleteBucket: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'd',
ns: 'metadata.__metastore',
o: {
_id: 'new-bucket',
},
},
};
const mongoIgnoredLogEntries = {
createBucket: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'c',
ns: 'metadata.$cmd',
o: {
create: 'new-bucket',
idIndex: {
v: 2,
key: { _id: 1 },
name: '_id_',
ns: 'metadata.new-bucket',
},
},
},
dropBucketDb: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'c',
ns: 'metadata.$cmd',
o: {
drop: 'new-bucket',
},
},
};
const expectedStreamEntries = {
insert: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
},
updateObject: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someUpdatedValue"}',
},
],
timestamp: new Date(42000),
},
deleteObject: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'delete',
},
],
timestamp: new Date(42000),
},
putBucketAttributes: {
db: '__metastore',
entries: [
{
key: 'new-bucket',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
},
deleteBucket: {
db: '__metastore',
entries: [
{
key: 'new-bucket',
type: 'delete',
},
],
timestamp: new Date(42000),
},
dropBucketDb: {
h: -42,
op: 'c',
ns: 'metadata.$cmd',
o: {
drop: 'new-bucket',
},
},
};
class MongoCursorMock {
constructor(itemsToYield, errorAtPos) {
this.itemsToYield = itemsToYield;
this.pos = 0;
this.errorAtPos = errorAtPos;
}
next(cb) {
// if there's no more item, just hang out there waiting for
// items that will never come (this is how the real mongo
// tailable cursor would behave)
if (this.pos === this.errorAtPos) {
return process.nextTick(() => cb(new Error('boo')));
}
if (!this.hasSentAllItems()) {
const pos = this.pos;
this.pos += 1;
return process.nextTick(() => cb(null, this.itemsToYield[pos]));
}
return undefined;
}
hasSentAllItems() {
return this.pos === this.itemsToYield.length;
}
}
describe('mongoclient.ListRecordStream', () => {
const lastEndIDEntry = {
h: -43,
ts: Timestamp.fromNumber(42),
};
Object.keys(mongoProcessedLogEntries).forEach(entryType => {
it(`should transform ${entryType}`, done => {
// first write will be ignored by ListRecordStream because
// of the last end ID (-42), it's needed though to bootstrap it
const cursor = new MongoCursorMock([
lastEndIDEntry,
mongoProcessedLogEntries[entryType],
]);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
let hasReceivedData = false;
lrs.on('data', entry => {
assert.strictEqual(hasReceivedData, false);
hasReceivedData = true;
assert.deepStrictEqual(entry, expectedStreamEntries[entryType]);
if (cursor.hasSentAllItems()) {
assert.strictEqual(hasReceivedData, true);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '-42' });
done();
}
});
});
});
it('should ignore other entry types', done => {
// first write will be ignored by ListRecordStream because
// of the last end ID (-43), it's needed though to bootstrap it
const logEntries = [lastEndIDEntry];
Object.keys(mongoIgnoredLogEntries).forEach(entryType => {
logEntries.push(mongoIgnoredLogEntries[entryType]);
});
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
lrs.on('data', entry => {
assert(false, `ListRecordStream did not ignore entry ${entry}`);
});
setTimeout(() => {
assert.strictEqual(cursor.hasSentAllItems(), true);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '-42' });
done();
}, 200);
});
it('should skip entries until uniqID is encountered', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(45) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(44) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, '5678');
assert.strictEqual(lrs.reachedUnpublishedListing(), false);
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 2);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 2);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should start after latest entry if uniqID is not encountered', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(45) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(44) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, '4242', '-1234');
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 1);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 3);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should consume from the first entry if there is no saved ID', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, undefined, '-1234');
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 4);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 0);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should emit an error event when cursor returns an error', done => {
const cursor = new MongoCursorMock([], 0);
const lrs = new ListRecordStream(cursor, logger, '4242', '-1234');
lrs.on('data', () => {
assert(false, 'did not expect data');
});
lrs.on('error', () => done());
});
it('should support bucket names with dots', done => {
const logEntry = {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'i',
ns: 'metadata.some.bucket.with.dots',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
value: {
someField: 'someValue',
},
},
};
const expectedLogEntry = {
db: 'some.bucket.with.dots',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
};
const cursor = new MongoCursorMock([
lastEndIDEntry,
logEntry,
]);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedLogEntry);
done();
});
});
});

View File

@ -0,0 +1,238 @@
const assert = require('assert');
const MongoClientInterface = require(
'../../../../../lib/storage/metadata/mongoclient/MongoClientInterface');
const DummyMongoDB = require('./utils/DummyMongoDB');
const DummyConfigObject = require('./utils/DummyConfigObject');
const DummyRequestLogger = require('./utils/DummyRequestLogger');
const log = new DummyRequestLogger();
const mongoTestClient = new MongoClientInterface({});
mongoTestClient.db = new DummyMongoDB();
describe('MongoClientInterface, init behavior', () => {
let s3ConfigObj;
const locationConstraints = {
locationOne: { isTransient: true },
locationTwo: { isTransient: false },
};
beforeEach(() => {
s3ConfigObj = new DummyConfigObject();
});
it('should set DataCounter transientList when declaring a ' +
'new MongoClientInterface object', () => {
s3ConfigObj.setLocationConstraints(locationConstraints);
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
const expectedRes = { locationOne: true, locationTwo: false };
assert.deepStrictEqual(
mongoClient.dataCount.transientList, expectedRes);
});
it('should update DataCounter transientList if location constraints ' +
'are updated', done => {
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
assert.deepStrictEqual(mongoClient.dataCount.transientList, {});
const expectedRes = { locationOne: true, locationTwo: false };
s3ConfigObj.once('MongoClientTestDone', () => {
assert.deepStrictEqual(
mongoClient.dataCount.transientList, expectedRes);
return done();
});
s3ConfigObj.setLocationConstraints(locationConstraints);
});
});
describe('MongoClientInterface::_handleResults', () => {
it('should return zero-result', () => {
const testInput = {
masterCount: 0, masterData: {},
nullCount: 0, nullData: {},
versionCount: 0, versionData: {},
};
const testResults = mongoTestClient._handleResults(testInput, true);
const expectedRes = {
versions: 0, objects: 0,
dataManaged: {
total: { curr: 0, prev: 0 },
locations: {},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
it('should return correct value if isVer is false', () => {
const testInput = {
masterCount: 2, masterData: { test1: 10, test2: 10 },
nullCount: 2, nullData: { test1: 10, test2: 10 },
versionCount: 2, versionData: { test1: 20, test2: 20 },
};
const testResults = mongoTestClient._handleResults(testInput, false);
const expectedRes = {
versions: 0, objects: 4,
dataManaged: {
total: { curr: 40, prev: 0 },
locations: {
test1: { curr: 20, prev: 0 },
test2: { curr: 20, prev: 0 },
},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
it('should return correct value if isVer is true', () => {
const testInput = {
masterCount: 2, masterData: { test1: 10, test2: 10 },
nullCount: 2, nullData: { test1: 10, test2: 10 },
versionCount: 4, versionData: { test1: 20, test2: 20 },
};
const testResults = mongoTestClient._handleResults(testInput, true);
const expectedRes = {
versions: 2, objects: 4,
dataManaged: {
total: { curr: 40, prev: 20 },
locations: {
test1: { curr: 20, prev: 10 },
test2: { curr: 20, prev: 10 },
},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
});
describe('MongoClientInterface::_handleMongo', () => {
beforeEach(() => mongoTestClient.db.reset());
it('should return error if mongo aggregate fails', done => {
const retValues = [new Error('testError')];
mongoTestClient.db.setReturnValues(retValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log, err => {
assert(err, 'Expected error, but got success');
return done();
});
});
it('should return empty object if mongo aggregate has no results', done => {
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {});
return done();
});
});
it('should return empty object if mongo aggregate has missing results',
done => {
const retValues = [[{
count: undefined,
data: undefined,
repData: undefined,
}]];
mongoTestClient.db.setReturnValues(retValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {});
return done();
});
});
const testRetValues = [[{
count: [{ _id: null, count: 100 }],
data: [
{ _id: 'locationone', bytes: 1000 },
{ _id: 'locationtwo', bytes: 1000 },
],
repData: [
{ _id: 'awsbackend', bytes: 500 },
{ _id: 'azurebackend', bytes: 500 },
{ _id: 'gcpbackend', bytes: 500 },
],
compData: [
{ _id: 'locationone', bytes: 500 },
{ _id: 'locationtwo', bytes: 500 },
],
}]];
it('should return correct results, transient false', done => {
mongoTestClient.db.setReturnValues(testRetValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 1000,
locationtwo: 1000,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
it('should return correct results, transient true', done => {
mongoTestClient.db.setReturnValues(testRetValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, true, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 500,
locationtwo: 500,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
const testRetValuesNeg = [[{
count: [{ _id: null, count: 100 }],
data: [
{ _id: 'locationone', bytes: 100 },
{ _id: 'locationtwo', bytes: 100 },
],
repData: [
{ _id: 'awsbackend', bytes: 500 },
{ _id: 'azurebackend', bytes: 500 },
{ _id: 'gcpbackend', bytes: 500 },
],
compData: [
{ _id: 'locationone', bytes: 500 },
{ _id: 'locationtwo', bytes: 500 },
],
}]];
it('should return clamp negative values to 0', done => {
mongoTestClient.db.setReturnValues(testRetValuesNeg);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, true, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 0,
locationtwo: 0,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
});

View File

@ -0,0 +1,16 @@
const { EventEmitter } = require('events');
class DummyConfigObject extends EventEmitter {
constructor() {
super();
this.locationConstraints = null;
this.isTest = true;
}
setLocationConstraints(locationConstraints) {
this.locationConstraints = locationConstraints;
this.emit('location-constraints-update');
}
}
module.exports = DummyConfigObject;

View File

@ -0,0 +1,103 @@
const testError = new Error('test error');
class DummyCollection {
constructor(name, isFail) {
this.s = {
name,
};
this.fail = isFail;
this.retQueue = [];
}
setReturnValues(retArray) {
this.retQueue.push(...retArray);
}
aggregate() {
return {
toArray: cb => {
if (this.retQueue.length <= 0) {
return cb(null, []);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
},
};
}
bulkWrite(cmds, opt, cb) {
process.stdout.write('mock mongodb.bulkWrite call\n');
if (this.fail) {
return cb(testError);
}
return cb();
}
update(target, doc, opt, cb) {
process.stdout.write('mock mongodb.update call\n');
if (this.fail) {
return cb(testError);
}
return cb();
}
find() {
return {
toArray: cb => {
if (this.retQueue.length <= 0) {
return cb(null, []);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
},
};
}
findOne(query, opt, cb) {
if (typeof opt === 'function' && cb === undefined) {
// eslint-disable-next-line no-param-reassign
cb = opt;
}
if (this.retQueue.length <= 0) {
return cb(null);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
}
}
class DummyMongoDB {
contructor() {
this.fail = false;
this.returnQueue = [];
}
reset() {
this.fail = false;
this.returnQueue = [];
}
setReturnValues(retValues) {
this.returnQueue.push(...retValues);
}
collection(name) {
const c = new DummyCollection(name, this.fail);
c.setReturnValues(this.returnQueue);
return c;
}
}
module.exports = DummyMongoDB;

View File

@ -0,0 +1,58 @@
class DummyRequestLogger {
constructor() {
this.ops = [];
this.counts = {
trace: 0,
debug: 0,
info: 0,
warn: 0,
error: 0,
fatal: 0,
};
this.defaultFields = {};
}
trace(msg) {
this.ops.push(['trace', [msg]]);
this.counts.trace += 1;
}
debug(msg) {
this.ops.push(['debug', [msg]]);
this.counts.debug += 1;
}
info(msg) {
this.ops.push(['info', [msg]]);
this.counts.info += 1;
}
warn(msg) {
this.ops.push(['warn', [msg]]);
this.counts.warn += 1;
}
error(msg) {
this.ops.push(['error', [msg]]);
this.counts.error += 1;
}
fatal(msg) {
this.ops.push(['fatal', [msg]]);
this.counts.fatal += 1;
}
getSerializedUids() { // eslint-disable-line class-methods-use-this
return 'dummy:Serialized:Uids';
}
addDefaultFields(fields) {
Object.assign(this.defaultFields, fields);
}
end() {
return this;
}
}
module.exports = DummyRequestLogger;

View File

@ -0,0 +1,24 @@
const basicMD = {
'content-length': 0,
'key': '',
'versionId': '',
'replicationInfo': {
backends: [], // site, status
},
'dataStoreName': 'mongotest',
};
function generateMD(objKey, size, versionId, repBackends) {
const retMD = JSON.parse(JSON.stringify(basicMD));
retMD.key = objKey;
retMD['content-length'] = size;
retMD.versionId = versionId;
if (repBackends && Array.isArray(repBackends)) {
retMD.replicationInfo.backends.push(...repBackends);
}
return retMD;
}
module.exports = {
generateMD,
};

View File

@ -28,9 +28,7 @@ describe('test generating versionIds', () => {
// nodejs 10 no longer returns error for non-hex string versionIds // nodejs 10 no longer returns error for non-hex string versionIds
it.skip('should return error decoding non-hex string versionIds', () => { it.skip('should return error decoding non-hex string versionIds', () => {
const encoded = vids.map(vid => VID.encode(vid)); assert(VID.decode('foo') instanceof Error);
const decoded = encoded.map(vid => VID.decode(`${vid}foo`));
decoded.forEach(result => assert(result instanceof Error));
}); });
it('should encode and decode versionIds', () => { it('should encode and decode versionIds', () => {

View File

@ -0,0 +1,372 @@
'use strict'; // eslint-disable-line strict
/* eslint new-cap: "off" */
/* eslint dot-notation: "off" */
const assert = require('assert');
const crypto = require('crypto');
const uuidv4 = require('uuid/v4');
const {
EchoChannel,
logger,
} = require('./ersatz.js');
const expectedObjectType = 'Symmetric Key';
const expectedAlgorithm = 'AES';
const expectedLength = 256;
const expectedBlockCipherMode = 'CBC';
const expectedPaddingMethod = 'PKCS5';
const expectedIV = Buffer.alloc(16).fill(0);
const versionMajor = 1;
const versionMinor = 4;
const vendorIdentification = 'Scality Loopback KMIP Server';
const serverExtensions = [
{
name: 'Security Level',
tag: 0x541976,
type: 7,
value: 'Gangsta Grade',
},
{
name: 'Prefered Algorithm',
tag: 0x542008,
type: 7,
value: 'Kevin Bacon',
},
{
name: 'Yo Dawg',
tag: 0x542011,
type: 7,
value: 'I heard you like kmip, so I put a server in your client ' +
'so you can do both ends of the conversation while you are ' +
'talking about server side encryption',
},
];
class DummyServerTransport {
registerHandshakeFunction() {
throw new Error('DummyServerTransport::registerHandshakeFunction: ' +
'Client side operations not implemented');
}
send() {
throw new Error('DummyServerTransport::send: ' +
'Client side operations not implemented');
}
}
class LoopbackServerChannel extends EchoChannel {
constructor(KMIPClass, Codec, options) {
super();
this.options = options || {
kmip: {
codec: {},
transport: {},
},
};
this.KMIP = KMIPClass;
this.kmip = new KMIPClass(Codec, DummyServerTransport,
this.options);
serverExtensions.forEach(extension => {
this.kmip.mapExtension(extension.name, extension.tag);
});
this.managedKeys = {};
}
write(data) {
const request = this.kmip._decodeMessage(logger, data);
const requestOperation = request.lookup(
'Request Message/Batch Item/Operation')[0];
this.routeRequest(
requestOperation, request, (err, responsePayload) => {
const uniqueBatchItemID = request.lookup(
'Request Message/Batch Item/Unique Batch Item ID')[0];
const requestProtocolVersionMinor = request.lookup(
'Request Message/Request Header/Protocol Version/' +
'Protocol Version Minor')[0];
const requestProtocolVersionMajor = request.lookup(
'Request Message/Request Header/Protocol Version/' +
'Protocol Version Major')[0];
let result;
if (err) {
logger.error('Request processing failed', { error: err });
result = err;
} else {
result = [
this.KMIP.Enumeration('Result Status', 'Success'),
this.KMIP.Structure('Response Payload',
responsePayload),
];
}
const response = this.KMIP.Message([
this.KMIP.Structure('Response Message', [
this.KMIP.Structure('Response Header', [
this.KMIP.Structure('Protocol Version', [
this.KMIP.Integer('Protocol Version Major',
requestProtocolVersionMajor),
this.KMIP.Integer('Protocol Version Minor',
requestProtocolVersionMinor),
]),
this.KMIP.DateTime('Time Stamp', new Date),
this.KMIP.Integer('Batch Count', 1),
]),
this.KMIP.Structure('Batch Item', [
this.KMIP.Enumeration('Operation',
requestOperation),
this.KMIP.ByteString('Unique Batch Item ID',
uniqueBatchItemID),
...result,
]),
]),
]);
super.write(this.kmip._encodeMessage(response));
});
return this;
}
errorResponse(reason, message) {
return [
this.KMIP.Enumeration('Result Status', 'Operation Failed'),
this.KMIP.Enumeration('Result Reason', reason),
this.KMIP.Enumeration('Result Message', message),
];
}
routeRequest(operation, request, cb) {
switch (operation) {
case 'Query': return this.routeQuery(request, cb);
case 'Discover Versions':
return this.routeDiscoverVersions(request, cb);
case 'Create': return this.routeCreate(request, cb);
case 'Activate': return this.routeActivate(request, cb);
case 'Encrypt': return this.routeEncrypt(request, cb);
case 'Decrypt': return this.routeDecrypt(request, cb);
case 'Revoke': return this.routeRevoke(request, cb);
case 'Destroy': return this.routeDestroy(request, cb);
default: return cb(new Error(`Unknown Operation: ${operation}`));
}
}
routeQuery(request, cb) {
const queryFunctions = request.lookup(
'Request Message/Batch Item/Request Payload/Query Function');
const response = [];
if (queryFunctions.includes('Query Operations')) {
response.push(
this.KMIP.Enumeration('Operation', 'Query'),
this.KMIP.Enumeration('Operation', 'Discover Versions'),
this.KMIP.Enumeration('Operation', 'Create'),
this.KMIP.Enumeration('Operation', 'Activate'),
this.KMIP.Enumeration('Operation', 'Encrypt'),
this.KMIP.Enumeration('Operation', 'Decrypt'),
this.KMIP.Enumeration('Operation', 'Revoke'),
this.KMIP.Enumeration('Operation', 'Destroy'));
}
if (queryFunctions.includes('Query Objects')) {
response.push(
this.KMIP.Enumeration('Object Type', 'Symmetric Key'));
}
if (queryFunctions.includes('Query Server Information')) {
response.push(
this.KMIP.TextString('Vendor Identification',
vendorIdentification),
this.KMIP.Structure('Server Information',
serverExtensions.map(extension =>
this.KMIP.TextString(
extension.name,
extension.value)
)));
}
if (queryFunctions.includes('Query Extension Map')) {
serverExtensions.forEach(extension => {
response.push(
this.KMIP.Structure('Extension Information', [
this.KMIP.TextString('Extension Name', extension.name),
this.KMIP.Integer('Extension Tag', extension.tag),
/* 7 is KMIP TextString, not really used anyway in
* this implenetation, it could be anything
* without changing the behavior of the client code. */
this.KMIP.Integer('Extension Type', 7),
]));
});
}
return cb(null, response);
}
routeDiscoverVersions(request, cb) {
const response = [
this.KMIP.Structure('Protocol Version', [
this.KMIP.Integer('Protocol Version Major', versionMajor),
this.KMIP.Integer('Protocol Version Minor', versionMinor),
]),
];
return cb(null, response);
}
routeCreate(request, cb) {
let cryptographicAlgorithm;
let cryptographicLength;
let cryptographicUsageMask;
let activationDate;
const attributes = request.lookup(
'Request Message/Batch Item/Request Payload/Template-Attribute')[0];
attributes.forEach(item => {
const attribute = item['Attribute'];
const attributeValue = attribute.value[1]['Attribute Value'];
const diversion = attributeValue.diversion;
const value = attributeValue.value;
switch (diversion) {
case 'Cryptographic Algorithm':
assert(!cryptographicAlgorithm);
cryptographicAlgorithm = value;
break;
case 'Cryptographic Length':
assert(!cryptographicLength);
cryptographicLength = value;
break;
case 'Cryptographic Usage Mask':
assert(!cryptographicUsageMask);
cryptographicUsageMask = value;
break;
case 'Activation Date':
assert(!activationDate);
activationDate = value;
break;
default:
}
});
const decodedUsageMask =
this.kmip.decodeMask('Cryptographic Usage Mask',
cryptographicUsageMask);
assert(cryptographicAlgorithm === expectedAlgorithm);
assert(cryptographicLength === expectedLength);
assert(decodedUsageMask.includes('Encrypt'));
assert(decodedUsageMask.includes('Decrypt'));
const key = Buffer.from(crypto.randomBytes(cryptographicLength / 8));
const keyIdentifier = uuidv4();
this.managedKeys[keyIdentifier] = {
key,
activationDate,
};
const response = [
this.KMIP.Enumeration('Object Type', expectedObjectType),
this.KMIP.TextString('Unique Identifier', keyIdentifier),
];
return cb(null, response);
}
routeActivate(request, cb) {
const keyIdentifier = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Unique Identifier') || [undefined])[0];
this.managedKeys[keyIdentifier].activationDate =
new Date;
const response = [
this.KMIP.TextString('Unique Identifier', keyIdentifier),
];
return cb(null, response);
}
_getIvCounterNonce(request) {
/* Having / in the path is not a good idea for the server side.
* Because of this, Message::lookup() cannot be directly used to
* extract the IV, hence this function */
const requestPayload = (
request.lookup(
'Request Message/Batch Item/Request Payload')
|| [undefined])[0];
let ivCounterNonce;
requestPayload.forEach(attr => {
const ivCounterNonceAttr = attr['IV/Counter/Nonce'];
if (ivCounterNonceAttr) {
ivCounterNonce = ivCounterNonceAttr.value;
}
});
return ivCounterNonce;
}
_transform(cipherFunc, request, cb) {
const keyIdentifier = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Unique Identifier') || [undefined])[0];
const blockCipherMode = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Cryptographic Parameters/Block Cipher Mode')
|| [undefined])[0];
const paddingMethod = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Cryptographic Parameters/Padding Method')
|| [undefined])[0];
const cryptographicAlgorithm = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Cryptographic Parameters/Cryptographic Algorithm')
|| [undefined])[0];
const ivCounterNonce = this._getIvCounterNonce(request);
const data = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Data')
|| [undefined])[0];
assert(blockCipherMode === expectedBlockCipherMode);
assert(paddingMethod === expectedPaddingMethod);
assert(cryptographicAlgorithm === expectedAlgorithm);
assert(expectedIV.compare(ivCounterNonce) === 0);
const keySpec = this.managedKeys[keyIdentifier];
const now = new Date;
assert(keySpec.activationDate && keySpec.activationDate <= now);
const cipher = cipherFunc('aes-256-cbc', keySpec.key, ivCounterNonce);
let cipheredData = cipher.update(data);
const final = cipher.final();
if (final.length !== 0) {
cipheredData = Buffer.concat([cipheredData, final]);
}
const response = [
this.KMIP.TextString('Unique Identifier', keyIdentifier),
this.KMIP.ByteString('Data', cipheredData),
];
return cb(null, response);
}
routeEncrypt(request, cb) {
return this._transform(crypto.createCipheriv, request, cb);
}
routeDecrypt(request, cb) {
return this._transform(crypto.createDecipheriv, request, cb);
}
routeRevoke(request, cb) {
const keyIdentifier = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Unique Identifier') || [undefined])[0];
this.managedKeys[keyIdentifier].activationDate = null;
const response = [
this.KMIP.TextString('Unique Identifier', keyIdentifier),
];
return cb(null, response);
}
routeDestroy(request, cb) {
const keyIdentifier = (
request.lookup(
'Request Message/Batch Item/Request Payload/' +
'Unique Identifier') || [undefined])[0];
assert(!this.managedKeys[keyIdentifier].activationDate);
this.managedKeys[keyIdentifier] = null;
const response = [
this.KMIP.TextString('Unique Identifier', keyIdentifier),
];
return cb(null, response);
}
}
module.exports = LoopbackServerChannel;

View File

@ -0,0 +1,127 @@
'use strict'; // eslint-disable-line strict
module.exports = [
/* Invalid type */
Buffer.from('2100000000000000', 'hex'),
Buffer.from('2100000b00000000', 'hex'),
/* Structure */
// too short
Buffer.from('42', 'hex'),
Buffer.from('4200', 'hex'),
Buffer.from('420078', 'hex'),
Buffer.from('42007801', 'hex'),
Buffer.from('4200780100', 'hex'),
Buffer.from('420078010000', 'hex'),
Buffer.from('4200780100000001', 'hex'),
Buffer.from('420078010000000100', 'hex'),
Buffer.from('4200780100000008', 'hex'),
Buffer.from('420078010000000800', 'hex'),
Buffer.from('42007801000000080000', 'hex'),
Buffer.from('4200780100000008000000', 'hex'),
Buffer.from('420078010000000800000000', 'hex'),
Buffer.from('4200780100000010', 'hex'),
/* Integer */
// too short
Buffer.from('4200780200000004', 'hex'),
Buffer.from('420078020000000400', 'hex'),
Buffer.from('42007802000000040000', 'hex'),
Buffer.from('4200780200000004000000', 'hex'),
// invalid length for the type
Buffer.from('42007802000000080000000000000000', 'hex'),
Buffer.from('42007802000000020000000000000000', 'hex'),
Buffer.from('42007802000000000000000000000000', 'hex'),
/* Long Integer */
// too short
Buffer.from('4200780300000008', 'hex'),
Buffer.from('420078030000000810', 'hex'),
Buffer.from('42007803000000081000', 'hex'),
Buffer.from('4200780300000008100000', 'hex'),
Buffer.from('420078030000000810000000', 'hex'),
Buffer.from('42007803000000081000000000', 'hex'),
Buffer.from('4200780300000008100000000000', 'hex'),
Buffer.from('420078030000000810000000000000', 'hex'),
// 53bit overflow
Buffer.from('42007803000000081000000000000000', 'hex'),
Buffer.from('4200780300000008ffffffffffffffff', 'hex'),
// invalid length for the type
Buffer.from('420078030000000400000001', 'hex'),
Buffer.from('42007803000000100000000000000000100000000000000000', 'hex'),
/* Big Integer */
// too short
Buffer.from('4200780400000001', 'hex'),
Buffer.from('420078040000000200', 'hex'),
/* Enumeration */
// too short
Buffer.from('4200740500000004', 'hex'),
Buffer.from('4200740500000004000000', 'hex'),
// invalid length for the type
Buffer.from('42007405000000020000', 'hex'),
Buffer.from('4200740500000006000000000000', 'hex'),
// non existing tag and enum value with invalid length
Buffer.from('45007405000000020000', 'hex'),
Buffer.from('4500740500000006000000000000', 'hex'),
/* Boolean */
// too short
Buffer.from('4200740600000008', 'hex'),
Buffer.from('420074060000000800', 'hex'),
Buffer.from('42007406000000080000', 'hex'),
Buffer.from('4200740600000008000000', 'hex'),
Buffer.from('420074060000000800000000', 'hex'),
Buffer.from('42007406000000080000000000', 'hex'),
Buffer.from('4200740600000008000000000000', 'hex'),
// invalid length
Buffer.from('420074060000000400000000', 'hex'),
Buffer.from('420074060000001000000000000000000000000000000000', 'hex'),
/* TextString */
// too short
Buffer.from('4200740700000008', 'hex'),
Buffer.from('420074070000000800', 'hex'),
Buffer.from('42007407000000080000', 'hex'),
Buffer.from('4200740700000008000000', 'hex'),
Buffer.from('420074070000000800000000', 'hex'),
Buffer.from('42007407000000080000000000', 'hex'),
Buffer.from('4200740700000008000000000000', 'hex'),
/* ByteString */
// too short
Buffer.from('4200740800000008', 'hex'),
Buffer.from('420074080000000800', 'hex'),
Buffer.from('42007408000000080000', 'hex'),
Buffer.from('4200740800000008000000', 'hex'),
Buffer.from('420074080000000800000000', 'hex'),
Buffer.from('42007408000000080000000000', 'hex'),
Buffer.from('4200740800000008000000000000', 'hex'),
/* Date-Time */
// too short
Buffer.from('4200740900000008', 'hex'),
Buffer.from('420074090000000800', 'hex'),
Buffer.from('42007409000000080000', 'hex'),
Buffer.from('4200740900000008000000', 'hex'),
Buffer.from('420074090000000800000000', 'hex'),
Buffer.from('42007409000000080000000000', 'hex'),
Buffer.from('4200740900000008000000000000', 'hex'),
// invalid length
Buffer.from('420074090000000400000000', 'hex'),
Buffer.from('420074090000001000000000000000000000000000000000', 'hex'),
/* Interval */
// too short
Buffer.from('4200780a00000004', 'hex'),
Buffer.from('4200780a0000000400', 'hex'),
Buffer.from('4200780a000000040000', 'hex'),
Buffer.from('4200780a00000004000000', 'hex'),
// invalid length for the type
Buffer.from('4200780a000000080000000000000000', 'hex'),
Buffer.from('4200780a000000020000000000000000', 'hex'),
Buffer.from('4200780a000000000000000000000000', 'hex'),
];

125
tests/utils/kmip/ersatz.js Normal file
View File

@ -0,0 +1,125 @@
'use strict'; // eslint-disable-line
/* eslint new-cap: "off" */
const logger = {
info: () => {},
debug: () => {},
warn: () => {},
error: () => {},
};
/* Fake tls AND socket objects, duck type */
class EchoChannel {
constructor() {
this.clogged = false;
this.eventHandler = {};
this.deferedSignal = {};
}
/* tls object members substitutes */
connect(port, options, cb) {
process.nextTick(cb);
return this;
}
on(event, cb) {
this.eventHandler[event] = cb;
if (this.deferedSignal[event] &&
this.deferedSignal[event].length > 0) {
this.deferedSignal[event].forEach(this.eventHandler[event]);
this.deferedSignal[event] = undefined;
}
return this;
}
/* socket object members substitutes */
cork() {
return this;
}
uncork() {
return this;
}
write(data) {
if (!this.clogged) {
return this.emit('data', data);
}
return this;
}
end() {
return this.emit('end');
}
/* Instrumentation member functions */
emit(event, data) {
if (this.eventHandler[event]) {
this.eventHandler[event](data);
} else {
if (!this.deferedSignal[event]) {
this.deferedSignal[event] = [];
}
this.deferedSignal[event].push(data);
}
return this;
}
clog() {
this.clogged = true;
return this;
}
}
class MirrorChannel extends EchoChannel {
constructor(KMIPClass, Codec) {
super();
this.codec = new Codec({});
this.KMIP = KMIPClass;
}
write(data) {
const request = this.codec.decode(logger, data);
const uniqueBatchItemID = request.lookup(
'Request Message/Batch Item/Unique Batch Item ID')[0];
const requestPayload = request.lookup(
'Request Message/Batch Item/Request Payload')[0];
const requestProtocolVersionMinor = request.lookup(
'Request Message/Request Header/Protocol Version/' +
'Protocol Version Minor')[0];
const requestProtocolVersionMajor = request.lookup(
'Request Message/Request Header/Protocol Version/' +
'Protocol Version Major')[0];
const requestOperation = request.lookup(
'Request Message/Batch Item/Operation')[0];
const response = this.KMIP.Message([
this.KMIP.Structure('Response Message', [
this.KMIP.Structure('Response Header', [
this.KMIP.Structure('Protocol Version', [
this.KMIP.Integer('Protocol Version Major',
requestProtocolVersionMajor),
this.KMIP.Integer('Protocol Version Minor',
requestProtocolVersionMinor),
]),
this.KMIP.DateTime('Time Stamp', new Date),
this.KMIP.Integer('Batch Count', 1),
]),
this.KMIP.Structure('Batch Item', [
this.KMIP.Enumeration('Operation', requestOperation),
this.KMIP.ByteString('Unique Batch Item ID',
uniqueBatchItemID),
this.KMIP.Enumeration('Result Status', 'Success'),
this.KMIP.Structure('Response Payload', requestPayload),
]),
]),
]);
super.write(this.codec.encode(response));
return this;
}
}
module.exports = { logger, EchoChannel, MirrorChannel };

View File

@ -0,0 +1,82 @@
'use strict'; // eslint-disable-line strict
/* eslint new-cap: "off" */
const KMIP = require('../../../lib/network/kmip');
module.exports = [
{
operation: 'Query',
payload: () => [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
],
},
{
operation: 'Query',
payload: () => [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
KMIP.Enumeration('Query Function',
'Query Server Information'),
KMIP.Enumeration('Query Function', 'Query Profiles'),
KMIP.Enumeration('Query Function', 'Query Capabilities'),
KMIP.Enumeration('Query Function',
'Query Application Namespaces'),
KMIP.Enumeration('Query Function', 'Query Extension List'),
KMIP.Enumeration('Query Function', 'Query Extension Map'),
KMIP.Enumeration('Query Function',
'Query Attestation Types'),
KMIP.Enumeration('Query Function', 'Query RNGs'),
KMIP.Enumeration('Query Function', 'Query Validations'),
KMIP.Enumeration('Query Function',
'Query Client Registration Methods'),
],
},
{
operation: 'Discover Versions',
payload: () => [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 2),
KMIP.Integer('Protocol Version Minor', 0),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 4),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 3),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 2),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 1),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 0),
]),
],
},
{
operation: 'Create',
payload: kmip => [
KMIP.Enumeration('Object Type', 'Symmetric Key'),
KMIP.Structure('Template-Attribute', [
KMIP.Attribute('TextString', 'x-Name', 's3-thekey'),
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm',
'AES'),
KMIP.Attribute('Integer', 'Cryptographic Length', 256),
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
kmip.encodeMask(
'Cryptographic Usage Mask',
['Encrypt', 'Decrypt'])),
KMIP.Attribute('Date-Time', 'Activation Date',
new Date),
]),
],
},
];

View File

@ -0,0 +1,120 @@
'use strict'; // eslint-disable-line strict
/* eslint new-cap: "off" */
const TTLVCodec = require('../../../lib/network/kmip/codec/ttlv.js');
const KMIP = require('../../../lib/network/kmip');
const kmip = new KMIP(TTLVCodec,
class DummyTransport {},
{ kmip: {} }, () => {});
module.exports = [
KMIP.Message([
KMIP.Structure('Request Message', [
KMIP.Structure('Request Header', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 3),
]),
KMIP.Integer('Maximum Response Size', 256),
KMIP.Integer('Batch Count', 1),
]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', 'Query'),
KMIP.Structure('Request Payload', [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
]),
]),
]),
]),
KMIP.Message([
KMIP.Structure('Request Message', [
KMIP.Structure('Request Header', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 2),
]),
KMIP.Integer('Maximum Response Size', 2048),
KMIP.Boolean('Asynchronous Indicator', false),
KMIP.Integer('Batch Count', 3),
KMIP.ByteString('Asynchronous Correlation Value',
Buffer.from('Arrggg...', 'utf8')),
]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', 'Query'),
KMIP.ByteString('Unique Batch Item ID',
Buffer.from('foo', 'utf8')),
KMIP.Structure('Request Payload', [
KMIP.Enumeration('Query Function', 'Query Operations'),
KMIP.Enumeration('Query Function', 'Query Objects'),
KMIP.Enumeration('Query Function',
'Query Server Information'),
KMIP.Enumeration('Query Function', 'Query Profiles'),
KMIP.Enumeration('Query Function', 'Query Capabilities'),
KMIP.Enumeration('Query Function',
'Query Application Namespaces'),
KMIP.Enumeration('Query Function', 'Query Extension List'),
KMIP.Enumeration('Query Function', 'Query Extension Map'),
KMIP.Enumeration('Query Function',
'Query Attestation Types'),
KMIP.Enumeration('Query Function', 'Query RNGs'),
KMIP.Enumeration('Query Function', 'Query Validations'),
KMIP.Enumeration('Query Function',
'Query Client Registration Methods'),
]),
]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', 'Discover Versions'),
KMIP.ByteString('Unique Batch Item ID',
Buffer.from('bar', 'utf8')),
KMIP.Structure('Request Payload', [
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 2),
KMIP.Integer('Protocol Version Minor', 0),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 4),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 3),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 2),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 1),
]),
KMIP.Structure('Protocol Version', [
KMIP.Integer('Protocol Version Major', 1),
KMIP.Integer('Protocol Version Minor', 0),
]),
]),
]),
KMIP.Structure('Batch Item', [
KMIP.Enumeration('Operation', 'Create'),
KMIP.ByteString('Unique Batch Item ID',
Buffer.from('baz', 'utf8')),
KMIP.Structure('Request Payload', [
KMIP.Enumeration('Object Type', 'Symmetric Key'),
KMIP.Structure('Template-Attribute', [
KMIP.Attribute('TextString', 'x-Name', 's3-thekey'),
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm',
'AES'),
KMIP.Attribute('Integer', 'Cryptographic Length', 256),
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
kmip.encodeMask(
'Cryptographic Usage Mask',
['Encrypt', 'Decrypt'])),
KMIP.Attribute('Date-Time', 'Activation Date',
new Date),
]),
]),
]),
]),
]),
];

View File

@ -0,0 +1,247 @@
'use strict'; // eslint-disable-line strict
module.exports = [
{
request: Buffer.from('42007801000000904200770100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200500200000004' +
'000001000000000042000d0200000004' +
'000000010000000042000f0100000038' +
'42005c05000000040000001800000000' +
'42007901000000204200740500000004' +
'00000001000000004200740500000004' +
'0000000200000000', 'hex'),
response: Buffer.from('42007b01000000a042007a0100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200920900000008' +
'00000000568a5be242000d0200000004' +
'000000010000000042000f0100000048' +
'42005c05000000040000001800000000' +
'42007f05000000040000000100000000' +
'42007e05000000040000000200000000' +
'42007d0700000009544f4f5f4c415247' +
'4500000000000000', 'hex'),
},
{
request: Buffer.from('42007801000000904200770100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200500200000004' +
'000008000000000042000d0200000004' +
'000000010000000042000f0100000038' +
'42005c05000000040000001800000000' +
'42007901000000204200740500000004' +
'00000001000000004200740500000004' +
'0000000200000000', 'hex'),
response: Buffer.from('42007b010000038042007a0100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200920900000008' +
'00000000568a5be242000d0200000004' +
'000000010000000042000f0100000328' +
'42005c05000000040000001800000000' +
'42007f05000000040000000000000000' +
'42007c010000030042005c0500000004' +
'000000180000000042005c0500000004' +
'000000080000000042005c0500000004' +
'000000140000000042005c0500000004' +
'0000000a0000000042005c0500000004' +
'000000010000000042005c0500000004' +
'000000030000000042005c0500000004' +
'0000000b0000000042005c0500000004' +
'0000000c0000000042005c0500000004' +
'0000000d0000000042005c0500000004' +
'0000000e0000000042005c0500000004' +
'0000000f0000000042005c0500000004' +
'000000120000000042005c0500000004' +
'000000130000000042005c0500000004' +
'0000001a0000000042005c0500000004' +
'000000190000000042005c0500000004' +
'000000090000000042005c0500000004' +
'000000110000000042005c0500000004' +
'000000020000000042005c0500000004' +
'000000040000000042005c0500000004' +
'000000150000000042005c0500000004' +
'000000160000000042005c0500000004' +
'000000100000000042005c0500000004' +
'0000001d0000000042005c0500000004' +
'000000060000000042005c0500000004' +
'000000070000000042005c0500000004' +
'0000001e0000000042005c0500000004' +
'0000001b0000000042005c0500000004' +
'0000001c0000000042005c0500000004' +
'000000250000000042005c0500000004' +
'000000260000000042005c0500000004' +
'0000001f0000000042005c0500000004' +
'000000200000000042005c0500000004' +
'000000210000000042005c0500000004' +
'000000220000000042005c0500000004' +
'000000230000000042005c0500000004' +
'000000240000000042005c0500000004' +
'000000270000000042005c0500000004' +
'000000280000000042005c0500000004' +
'00000029000000004200570500000004' +
'00000001000000004200570500000004' +
'00000002000000004200570500000004' +
'00000007000000004200570500000004' +
'00000003000000004200570500000004' +
'00000004000000004200570500000004' +
'00000006000000004200570500000004' +
'00000008000000004200570500000004' +
'00000005000000004200570500000004' +
'0000000900000000', 'hex'),
},
{
request: Buffer.from('42007801000000d04200770100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200500200000004' +
'000008000000000042000d0200000004' +
'000000020000000042000f0100000048' +
'4200930800000003666f6f0000000000' +
'42005c05000000040000001800000000' +
'42007901000000204200740500000004' +
'00000001000000004200740500000004' +
'000000020000000042000f0100000028' +
'42005c05000000040000001800000000' +
'42007901000000104200740500000004' +
'0000000300000000', 'hex'),
response: Buffer.from('42007b010000028042007a0100000048' +
'420069010000002042006a0200000004' +
'000000010000000042006b0200000004' +
'00000003000000004200920900000008' +
'000000005c2d3df242000d0200000004' +
'000000020000000042000f0100000188' +
'42005c05000000040000001800000000' +
'4200930800000003666f6f0000000000' +
'42007f05000000040000000000000000' +
'42007c010000015042005c0500000004' +
'000000010000000042005c0500000004' +
'000000020000000042005c0500000004' +
'000000030000000042005c0500000004' +
'000000080000000042005c0500000004' +
'0000000a0000000042005c0500000004' +
'0000000b0000000042005c0500000004' +
'0000000c0000000042005c0500000004' +
'0000000d0000000042005c0500000004' +
'0000000e0000000042005c0500000004' +
'0000000f0000000042005c0500000004' +
'000000120000000042005c0500000004' +
'000000130000000042005c0500000004' +
'000000140000000042005c0500000004' +
'000000180000000042005c0500000004' +
'0000001e000000004200570500000004' +
'00000001000000004200570500000004' +
'00000002000000004200570500000004' +
'00000003000000004200570500000004' +
'00000004000000004200570500000004' +
'00000006000000004200570500000004' +
'000000070000000042000f0100000098' +
'42005c05000000040000001800000000' +
'42009308000000036261720000000000' +
'42007f05000000040000000000000000' +
'42007c010000006042009d070000000c' +
'4b4b4b4b4b4b4b4b4b4b4b4b00000000' +
'4200880100000040420088070000000d' +
'4b4b4b4b4b4b4b4b4b4b4b4b4b000000' +
'420088070000000c4b4b4b4b4b4b4b4b' +
'4b4b4b4b000000004200880700000005' +
'4b4b4b4b4b000000', 'hex'),
},
{
request: Buffer.from('', 'hex'),
response: Buffer.from('', 'hex'),
},
{
request: Buffer.from('4200780100000000', 'hex'),
response: Buffer.from('4200780100000000', 'hex'),
},
{
/* Valid message with unknown tag */
request: Buffer.from('56000002000000040000000100000000', 'hex'),
response: Buffer.from('56000006000000080000000000000001', 'hex'),
degenerated: true,
},
{
/* Valid message with unknown enum */
/* on a non-enumeration tag */
request: Buffer.from('42007805000000040000000100000000', 'hex'),
/* on an enumeration tag */
response: Buffer.from('42007405000000040000000000000000', 'hex'),
degenerated: true,
},
{
/* padding is missing at the end of the message */
request: Buffer.from('42000f080000000400000001', 'hex'),
response: Buffer.from('42000f0a0000000400000001', 'hex'),
degenerated: true,
},
{
request: Buffer.from('42000f08000000040000000100000000', 'hex'),
response: Buffer.from('42000f0a000000040000000100000000', 'hex'),
degenerated: false,
},
{
/* chained message, shouldn't happen but validate the structure loop */
request: Buffer.from('42000f08000000040000000100000000' +
'42000f08000000040000000100000000' +
'42000f08000000040000000100000000' +
'42000f08000000040000000100000000'
, 'hex'),
response: Buffer.from('42000f0a000000040000000100000000' +
'42000f0a000000040000000100000000' +
'42000f0a000000040000000100000000' +
'42000f0a000000040000000100000000'
, 'hex'),
degenerated: false,
},
{
/* zero-length payload */
request: Buffer.from('4200780400000000', 'hex'),
response: Buffer.from('4200780400000000', 'hex'),
degenerated: false,
},
{
/* no padding */
request: Buffer.from('420078040000000100', 'hex'),
response: Buffer.from('420078040000000100', 'hex'),
degenerated: true,
},
{
request: Buffer.from('42007406000000080000000000000001', 'hex'),
response: Buffer.from('42007406000000080000000000000000', 'hex'),
degenerated: false,
},
{
request: Buffer.from('42007406000000081111111111111111', 'hex'),
response: Buffer.from('42007406000000080101010101010101', 'hex'),
degenerated: true,
},
{
request: Buffer.from('4200740700000000', 'hex'),
response: Buffer.from('42007407000000010100000000000000', 'hex'),
degenerated: false,
},
{
request: Buffer.from('42007407000000010000000000000000', 'hex'),
response: Buffer.from('42007407000000020100000000000000', 'hex'),
degenerated: false,
},
{
request: Buffer.from('4200740800000000', 'hex'),
response: Buffer.from('42007408000000010100000000000000', 'hex'),
degenerated: false,
},
{
request: Buffer.from('42007408000000010000000000000000', 'hex'),
response: Buffer.from('42007408000000020100000000000000', 'hex'),
degenerated: false,
},
{
request: Buffer.from('42007409000000080000000000000001', 'hex'),
response: Buffer.from('42007409000000080000000000000000', 'hex'),
degenerated: false,
},
];

185
tests/utils/mdProxyUtils.js Normal file
View File

@ -0,0 +1,185 @@
'use strict'; // eslint-disable-line strict
class DummyProxyResponse {
/**
* Create a new instance of this dummy class
*
* This dummy class implements the minimum feature set
* of the class http.OutgoingMessage suitable for the
* arsenal.storage.metadata.proxy.BucketdRoutes test
* without using an actuall http server.
*
* @param {function} doneCB - function called once the response is
* ready to be consummed. (err, response, body)
*/
constructor(doneCB) {
this.headers = {};
this.body = null;
this.endCalled = false;
this.responseHead = null;
this.doneCB = doneCB;
}
writeHead(statusCode, statusMessage, header) {
this.responseHead = {
statusCode,
statusMessage,
header,
};
}
write(data) {
this.body = data;
}
end(cb) {
if (this.endCalled) {
return;
}
this.endCalled = true;
process.nextTick(() => {
cb(null);
this.doneCB(null, this, JSON.parse(this.body));
});
}
}
class DummyProxyRequest {
/**
* Create a new instance of this dummy class
*
* This dummy class implements the minimum feature set
* of the class http.IncomingMessage suitable for the
* arsenal.storage.metadata.proxy.BucketdRoutes test
* without using an actuall http server.
*
* @param {object} params - parameter set describing the intended request
* @param {string} params.method - http method to fake
* @param {string} params.url - url to fake
* @param {string} params.body - request body to fake
* @param {boolean} params.json - if set, assume the body to be a JSON
* value to be serialized
* @param {object} params.headers - request headers to fake
*/
constructor(params) {
this.method = params.method;
this.url = params.url;
this.json = params.json;
this.body = new Buffer(
this.json ? JSON.stringify(params.body) : (params.body || ''));
this.headers = params.headers;
this.socket = {
remoteAddress: '127.0.0.1',
remotePort: 32769,
};
this.dataConsummed = false;
this.endCB = null;
}
/**
* bind a callback to a particular event on the request processing
*
* @param {string} event - one of 'data', 'end' or 'error'
* @param {function} callback - a function suitable for the associated event
* @returns {object} this
*/
on(event, callback) {
switch (event) {
case 'data':
process.nextTick(() => {
callback(this.body);
this.dataConsummed = true;
if (this.endCB) {
this.endCB();
}
});
break;
case 'end':
if (!this.dataConsummed) {
this.endCB = callback;
} else {
process.nextTick(() => {
callback();
});
}
break;
case 'error':
// never happen with this mockup class
break;
default:
process.nextTick(() => callback(new Error(
`Unsupported DummyProxyRequest.on event '${event}'`)));
}
return this;
}
}
class RequestDispatcher {
/**
* Construct a new RequestDispatcher object.
*
* This class connects the provided Routes class to a dummy interface
* that enables tests to perform requests without using an actual http
* server.
*
* @param {object} routes - an instance of a Routes dispatcher class
*/
constructor(routes) {
this.routes = routes;
}
/**
* fake a POST request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be posted
* @param {object} objectMD - the metadata to post for this object
* @param {function} callback - called once the request has been processed
* with these parameters (err)
* @returns {undefined}
*/
post(path, objectMD, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'POST',
url: path,
json: true,
body: objectMD,
headers: {},
}), new DummyProxyResponse(callback));
}
/**
* fake a GET request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be retrieved
* @param {function} callback - called once the request has been processed
* with these parameters (err, response, body)
* @returns {undefined}
*/
get(path, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'GET',
url: path,
json: true,
body: '',
headers: {},
}), new DummyProxyResponse(callback));
}
/**
* fake a DELETE request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be deleted
* @param {function} callback - called once the request has been processed
* with these parameters (err)
* @returns {undefined}
*/
delete(path, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'DELETE',
url: path,
json: true,
body: '',
headers: {},
}), new DummyProxyResponse(callback));
}
}
module.exports = { RequestDispatcher };